1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include <linux/iversion.h>
7 
8 #include "xfs.h"
9 #include "xfs_fs.h"
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_dir2.h"
18 #include "xfs_attr.h"
19 #include "xfs_trans_space.h"
20 #include "xfs_trans.h"
21 #include "xfs_buf_item.h"
22 #include "xfs_inode_item.h"
23 #include "xfs_iunlink_item.h"
24 #include "xfs_ialloc.h"
25 #include "xfs_bmap.h"
26 #include "xfs_bmap_util.h"
27 #include "xfs_errortag.h"
28 #include "xfs_error.h"
29 #include "xfs_quota.h"
30 #include "xfs_filestream.h"
31 #include "xfs_trace.h"
32 #include "xfs_icache.h"
33 #include "xfs_symlink.h"
34 #include "xfs_trans_priv.h"
35 #include "xfs_log.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_reflink.h"
38 #include "xfs_ag.h"
39 #include "xfs_log_priv.h"
40 
41 struct kmem_cache *xfs_inode_cache;
42 
43 /*
44  * Used in xfs_itruncate_extents().  This is the maximum number of extents
45  * freed from a file in a single transaction.
46  */
47 #define	XFS_ITRUNC_MAX_EXTENTS	2
48 
49 STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
50 STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
51 	struct xfs_inode *);
52 
53 /*
54  * helper function to extract extent size hint from inode
55  */
56 xfs_extlen_t
xfs_get_extsz_hint(struct xfs_inode * ip)57 xfs_get_extsz_hint(
58 	struct xfs_inode	*ip)
59 {
60 	/*
61 	 * No point in aligning allocations if we need to COW to actually
62 	 * write to them.
63 	 */
64 	if (xfs_is_always_cow_inode(ip))
65 		return 0;
66 	if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
67 		return ip->i_extsize;
68 	if (XFS_IS_REALTIME_INODE(ip))
69 		return ip->i_mount->m_sb.sb_rextsize;
70 	return 0;
71 }
72 
73 /*
74  * Helper function to extract CoW extent size hint from inode.
75  * Between the extent size hint and the CoW extent size hint, we
76  * return the greater of the two.  If the value is zero (automatic),
77  * use the default size.
78  */
79 xfs_extlen_t
xfs_get_cowextsz_hint(struct xfs_inode * ip)80 xfs_get_cowextsz_hint(
81 	struct xfs_inode	*ip)
82 {
83 	xfs_extlen_t		a, b;
84 
85 	a = 0;
86 	if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
87 		a = ip->i_cowextsize;
88 	b = xfs_get_extsz_hint(ip);
89 
90 	a = max(a, b);
91 	if (a == 0)
92 		return XFS_DEFAULT_COWEXTSZ_HINT;
93 	return a;
94 }
95 
96 /*
97  * These two are wrapper routines around the xfs_ilock() routine used to
98  * centralize some grungy code.  They are used in places that wish to lock the
99  * inode solely for reading the extents.  The reason these places can't just
100  * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
101  * bringing in of the extents from disk for a file in b-tree format.  If the
102  * inode is in b-tree format, then we need to lock the inode exclusively until
103  * the extents are read in.  Locking it exclusively all the time would limit
104  * our parallelism unnecessarily, though.  What we do instead is check to see
105  * if the extents have been read in yet, and only lock the inode exclusively
106  * if they have not.
107  *
108  * The functions return a value which should be given to the corresponding
109  * xfs_iunlock() call.
110  */
111 uint
xfs_ilock_data_map_shared(struct xfs_inode * ip)112 xfs_ilock_data_map_shared(
113 	struct xfs_inode	*ip)
114 {
115 	uint			lock_mode = XFS_ILOCK_SHARED;
116 
117 	if (xfs_need_iread_extents(&ip->i_df))
118 		lock_mode = XFS_ILOCK_EXCL;
119 	xfs_ilock(ip, lock_mode);
120 	return lock_mode;
121 }
122 
123 uint
xfs_ilock_attr_map_shared(struct xfs_inode * ip)124 xfs_ilock_attr_map_shared(
125 	struct xfs_inode	*ip)
126 {
127 	uint			lock_mode = XFS_ILOCK_SHARED;
128 
129 	if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
130 		lock_mode = XFS_ILOCK_EXCL;
131 	xfs_ilock(ip, lock_mode);
132 	return lock_mode;
133 }
134 
135 /*
136  * You can't set both SHARED and EXCL for the same lock,
137  * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED,
138  * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values
139  * to set in lock_flags.
140  */
141 static inline void
xfs_lock_flags_assert(uint lock_flags)142 xfs_lock_flags_assert(
143 	uint		lock_flags)
144 {
145 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
146 		(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
147 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
148 		(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
149 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
150 		(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
151 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
152 	ASSERT(lock_flags != 0);
153 }
154 
155 /*
156  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
157  * multi-reader locks: invalidate_lock and the i_lock.  This routine allows
158  * various combinations of the locks to be obtained.
159  *
160  * The 3 locks should always be ordered so that the IO lock is obtained first,
161  * the mmap lock second and the ilock last in order to prevent deadlock.
162  *
163  * Basic locking order:
164  *
165  * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
166  *
167  * mmap_lock locking order:
168  *
169  * i_rwsem -> page lock -> mmap_lock
170  * mmap_lock -> invalidate_lock -> page_lock
171  *
172  * The difference in mmap_lock locking order mean that we cannot hold the
173  * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
174  * can fault in pages during copy in/out (for buffered IO) or require the
175  * mmap_lock in get_user_pages() to map the user pages into the kernel address
176  * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
177  * fault because page faults already hold the mmap_lock.
178  *
179  * Hence to serialise fully against both syscall and mmap based IO, we need to
180  * take both the i_rwsem and the invalidate_lock. These locks should *only* be
181  * both taken in places where we need to invalidate the page cache in a race
182  * free manner (e.g. truncate, hole punch and other extent manipulation
183  * functions).
184  */
185 void
xfs_ilock(xfs_inode_t * ip,uint lock_flags)186 xfs_ilock(
187 	xfs_inode_t		*ip,
188 	uint			lock_flags)
189 {
190 	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
191 
192 	xfs_lock_flags_assert(lock_flags);
193 
194 	if (lock_flags & XFS_IOLOCK_EXCL) {
195 		down_write_nested(&VFS_I(ip)->i_rwsem,
196 				  XFS_IOLOCK_DEP(lock_flags));
197 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
198 		down_read_nested(&VFS_I(ip)->i_rwsem,
199 				 XFS_IOLOCK_DEP(lock_flags));
200 	}
201 
202 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
203 		down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
204 				  XFS_MMAPLOCK_DEP(lock_flags));
205 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
206 		down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
207 				 XFS_MMAPLOCK_DEP(lock_flags));
208 	}
209 
210 	if (lock_flags & XFS_ILOCK_EXCL)
211 		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
212 	else if (lock_flags & XFS_ILOCK_SHARED)
213 		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
214 }
215 
216 /*
217  * This is just like xfs_ilock(), except that the caller
218  * is guaranteed not to sleep.  It returns 1 if it gets
219  * the requested locks and 0 otherwise.  If the IO lock is
220  * obtained but the inode lock cannot be, then the IO lock
221  * is dropped before returning.
222  *
223  * ip -- the inode being locked
224  * lock_flags -- this parameter indicates the inode's locks to be
225  *       to be locked.  See the comment for xfs_ilock() for a list
226  *	 of valid values.
227  */
228 int
xfs_ilock_nowait(xfs_inode_t * ip,uint lock_flags)229 xfs_ilock_nowait(
230 	xfs_inode_t		*ip,
231 	uint			lock_flags)
232 {
233 	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
234 
235 	xfs_lock_flags_assert(lock_flags);
236 
237 	if (lock_flags & XFS_IOLOCK_EXCL) {
238 		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
239 			goto out;
240 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
241 		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
242 			goto out;
243 	}
244 
245 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
246 		if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
247 			goto out_undo_iolock;
248 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
249 		if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
250 			goto out_undo_iolock;
251 	}
252 
253 	if (lock_flags & XFS_ILOCK_EXCL) {
254 		if (!mrtryupdate(&ip->i_lock))
255 			goto out_undo_mmaplock;
256 	} else if (lock_flags & XFS_ILOCK_SHARED) {
257 		if (!mrtryaccess(&ip->i_lock))
258 			goto out_undo_mmaplock;
259 	}
260 	return 1;
261 
262 out_undo_mmaplock:
263 	if (lock_flags & XFS_MMAPLOCK_EXCL)
264 		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
265 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
266 		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
267 out_undo_iolock:
268 	if (lock_flags & XFS_IOLOCK_EXCL)
269 		up_write(&VFS_I(ip)->i_rwsem);
270 	else if (lock_flags & XFS_IOLOCK_SHARED)
271 		up_read(&VFS_I(ip)->i_rwsem);
272 out:
273 	return 0;
274 }
275 
276 /*
277  * xfs_iunlock() is used to drop the inode locks acquired with
278  * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
279  * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
280  * that we know which locks to drop.
281  *
282  * ip -- the inode being unlocked
283  * lock_flags -- this parameter indicates the inode's locks to be
284  *       to be unlocked.  See the comment for xfs_ilock() for a list
285  *	 of valid values for this parameter.
286  *
287  */
288 void
xfs_iunlock(xfs_inode_t * ip,uint lock_flags)289 xfs_iunlock(
290 	xfs_inode_t		*ip,
291 	uint			lock_flags)
292 {
293 	xfs_lock_flags_assert(lock_flags);
294 
295 	if (lock_flags & XFS_IOLOCK_EXCL)
296 		up_write(&VFS_I(ip)->i_rwsem);
297 	else if (lock_flags & XFS_IOLOCK_SHARED)
298 		up_read(&VFS_I(ip)->i_rwsem);
299 
300 	if (lock_flags & XFS_MMAPLOCK_EXCL)
301 		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
302 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
303 		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
304 
305 	if (lock_flags & XFS_ILOCK_EXCL)
306 		mrunlock_excl(&ip->i_lock);
307 	else if (lock_flags & XFS_ILOCK_SHARED)
308 		mrunlock_shared(&ip->i_lock);
309 
310 	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
311 }
312 
313 /*
314  * give up write locks.  the i/o lock cannot be held nested
315  * if it is being demoted.
316  */
317 void
xfs_ilock_demote(xfs_inode_t * ip,uint lock_flags)318 xfs_ilock_demote(
319 	xfs_inode_t		*ip,
320 	uint			lock_flags)
321 {
322 	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
323 	ASSERT((lock_flags &
324 		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
325 
326 	if (lock_flags & XFS_ILOCK_EXCL)
327 		mrdemote(&ip->i_lock);
328 	if (lock_flags & XFS_MMAPLOCK_EXCL)
329 		downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
330 	if (lock_flags & XFS_IOLOCK_EXCL)
331 		downgrade_write(&VFS_I(ip)->i_rwsem);
332 
333 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
334 }
335 
336 #if defined(DEBUG) || defined(XFS_WARN)
337 static inline bool
__xfs_rwsem_islocked(struct rw_semaphore * rwsem,bool shared)338 __xfs_rwsem_islocked(
339 	struct rw_semaphore	*rwsem,
340 	bool			shared)
341 {
342 	if (!debug_locks)
343 		return rwsem_is_locked(rwsem);
344 
345 	if (!shared)
346 		return lockdep_is_held_type(rwsem, 0);
347 
348 	/*
349 	 * We are checking that the lock is held at least in shared
350 	 * mode but don't care that it might be held exclusively
351 	 * (i.e. shared | excl). Hence we check if the lock is held
352 	 * in any mode rather than an explicit shared mode.
353 	 */
354 	return lockdep_is_held_type(rwsem, -1);
355 }
356 
357 bool
xfs_isilocked(struct xfs_inode * ip,uint lock_flags)358 xfs_isilocked(
359 	struct xfs_inode	*ip,
360 	uint			lock_flags)
361 {
362 	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
363 		if (!(lock_flags & XFS_ILOCK_SHARED))
364 			return !!ip->i_lock.mr_writer;
365 		return rwsem_is_locked(&ip->i_lock.mr_lock);
366 	}
367 
368 	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
369 		return __xfs_rwsem_islocked(&VFS_I(ip)->i_mapping->invalidate_lock,
370 				(lock_flags & XFS_MMAPLOCK_SHARED));
371 	}
372 
373 	if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
374 		return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
375 				(lock_flags & XFS_IOLOCK_SHARED));
376 	}
377 
378 	ASSERT(0);
379 	return false;
380 }
381 #endif
382 
383 /*
384  * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
385  * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
386  * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
387  * errors and warnings.
388  */
389 #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
390 static bool
xfs_lockdep_subclass_ok(int subclass)391 xfs_lockdep_subclass_ok(
392 	int subclass)
393 {
394 	return subclass < MAX_LOCKDEP_SUBCLASSES;
395 }
396 #else
397 #define xfs_lockdep_subclass_ok(subclass)	(true)
398 #endif
399 
400 /*
401  * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
402  * value. This can be called for any type of inode lock combination, including
403  * parent locking. Care must be taken to ensure we don't overrun the subclass
404  * storage fields in the class mask we build.
405  */
406 static inline uint
xfs_lock_inumorder(uint lock_mode,uint subclass)407 xfs_lock_inumorder(
408 	uint	lock_mode,
409 	uint	subclass)
410 {
411 	uint	class = 0;
412 
413 	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
414 			      XFS_ILOCK_RTSUM)));
415 	ASSERT(xfs_lockdep_subclass_ok(subclass));
416 
417 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
418 		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
419 		class += subclass << XFS_IOLOCK_SHIFT;
420 	}
421 
422 	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
423 		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
424 		class += subclass << XFS_MMAPLOCK_SHIFT;
425 	}
426 
427 	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
428 		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
429 		class += subclass << XFS_ILOCK_SHIFT;
430 	}
431 
432 	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
433 }
434 
435 /*
436  * The following routine will lock n inodes in exclusive mode.  We assume the
437  * caller calls us with the inodes in i_ino order.
438  *
439  * We need to detect deadlock where an inode that we lock is in the AIL and we
440  * start waiting for another inode that is locked by a thread in a long running
441  * transaction (such as truncate). This can result in deadlock since the long
442  * running trans might need to wait for the inode we just locked in order to
443  * push the tail and free space in the log.
444  *
445  * xfs_lock_inodes() can only be used to lock one type of lock at a time -
446  * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
447  * lock more than one at a time, lockdep will report false positives saying we
448  * have violated locking orders.
449  */
450 static void
xfs_lock_inodes(struct xfs_inode ** ips,int inodes,uint lock_mode)451 xfs_lock_inodes(
452 	struct xfs_inode	**ips,
453 	int			inodes,
454 	uint			lock_mode)
455 {
456 	int			attempts = 0;
457 	uint			i;
458 	int			j;
459 	bool			try_lock;
460 	struct xfs_log_item	*lp;
461 
462 	/*
463 	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
464 	 * support an arbitrary depth of locking here, but absolute limits on
465 	 * inodes depend on the type of locking and the limits placed by
466 	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
467 	 * the asserts.
468 	 */
469 	ASSERT(ips && inodes >= 2 && inodes <= 5);
470 	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
471 			    XFS_ILOCK_EXCL));
472 	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
473 			      XFS_ILOCK_SHARED)));
474 	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
475 		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
476 	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
477 		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
478 
479 	if (lock_mode & XFS_IOLOCK_EXCL) {
480 		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
481 	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
482 		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
483 
484 again:
485 	try_lock = false;
486 	i = 0;
487 	for (; i < inodes; i++) {
488 		ASSERT(ips[i]);
489 
490 		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
491 			continue;
492 
493 		/*
494 		 * If try_lock is not set yet, make sure all locked inodes are
495 		 * not in the AIL.  If any are, set try_lock to be used later.
496 		 */
497 		if (!try_lock) {
498 			for (j = (i - 1); j >= 0 && !try_lock; j--) {
499 				lp = &ips[j]->i_itemp->ili_item;
500 				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
501 					try_lock = true;
502 			}
503 		}
504 
505 		/*
506 		 * If any of the previous locks we have locked is in the AIL,
507 		 * we must TRY to get the second and subsequent locks. If
508 		 * we can't get any, we must release all we have
509 		 * and try again.
510 		 */
511 		if (!try_lock) {
512 			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
513 			continue;
514 		}
515 
516 		/* try_lock means we have an inode locked that is in the AIL. */
517 		ASSERT(i != 0);
518 		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
519 			continue;
520 
521 		/*
522 		 * Unlock all previous guys and try again.  xfs_iunlock will try
523 		 * to push the tail if the inode is in the AIL.
524 		 */
525 		attempts++;
526 		for (j = i - 1; j >= 0; j--) {
527 			/*
528 			 * Check to see if we've already unlocked this one.  Not
529 			 * the first one going back, and the inode ptr is the
530 			 * same.
531 			 */
532 			if (j != (i - 1) && ips[j] == ips[j + 1])
533 				continue;
534 
535 			xfs_iunlock(ips[j], lock_mode);
536 		}
537 
538 		if ((attempts % 5) == 0) {
539 			delay(1); /* Don't just spin the CPU */
540 		}
541 		goto again;
542 	}
543 }
544 
545 /*
546  * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
547  * mmaplock must be double-locked separately since we use i_rwsem and
548  * invalidate_lock for that. We now support taking one lock EXCL and the
549  * other SHARED.
550  */
551 void
xfs_lock_two_inodes(struct xfs_inode * ip0,uint ip0_mode,struct xfs_inode * ip1,uint ip1_mode)552 xfs_lock_two_inodes(
553 	struct xfs_inode	*ip0,
554 	uint			ip0_mode,
555 	struct xfs_inode	*ip1,
556 	uint			ip1_mode)
557 {
558 	int			attempts = 0;
559 	struct xfs_log_item	*lp;
560 
561 	ASSERT(hweight32(ip0_mode) == 1);
562 	ASSERT(hweight32(ip1_mode) == 1);
563 	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
564 	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
565 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
566 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
567 	ASSERT(ip0->i_ino != ip1->i_ino);
568 
569 	if (ip0->i_ino > ip1->i_ino) {
570 		swap(ip0, ip1);
571 		swap(ip0_mode, ip1_mode);
572 	}
573 
574  again:
575 	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
576 
577 	/*
578 	 * If the first lock we have locked is in the AIL, we must TRY to get
579 	 * the second lock. If we can't get it, we must release the first one
580 	 * and try again.
581 	 */
582 	lp = &ip0->i_itemp->ili_item;
583 	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
584 		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
585 			xfs_iunlock(ip0, ip0_mode);
586 			if ((++attempts % 5) == 0)
587 				delay(1); /* Don't just spin the CPU */
588 			goto again;
589 		}
590 	} else {
591 		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
592 	}
593 }
594 
595 uint
xfs_ip2xflags(struct xfs_inode * ip)596 xfs_ip2xflags(
597 	struct xfs_inode	*ip)
598 {
599 	uint			flags = 0;
600 
601 	if (ip->i_diflags & XFS_DIFLAG_ANY) {
602 		if (ip->i_diflags & XFS_DIFLAG_REALTIME)
603 			flags |= FS_XFLAG_REALTIME;
604 		if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
605 			flags |= FS_XFLAG_PREALLOC;
606 		if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
607 			flags |= FS_XFLAG_IMMUTABLE;
608 		if (ip->i_diflags & XFS_DIFLAG_APPEND)
609 			flags |= FS_XFLAG_APPEND;
610 		if (ip->i_diflags & XFS_DIFLAG_SYNC)
611 			flags |= FS_XFLAG_SYNC;
612 		if (ip->i_diflags & XFS_DIFLAG_NOATIME)
613 			flags |= FS_XFLAG_NOATIME;
614 		if (ip->i_diflags & XFS_DIFLAG_NODUMP)
615 			flags |= FS_XFLAG_NODUMP;
616 		if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
617 			flags |= FS_XFLAG_RTINHERIT;
618 		if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
619 			flags |= FS_XFLAG_PROJINHERIT;
620 		if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
621 			flags |= FS_XFLAG_NOSYMLINKS;
622 		if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
623 			flags |= FS_XFLAG_EXTSIZE;
624 		if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
625 			flags |= FS_XFLAG_EXTSZINHERIT;
626 		if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
627 			flags |= FS_XFLAG_NODEFRAG;
628 		if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
629 			flags |= FS_XFLAG_FILESTREAM;
630 	}
631 
632 	if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
633 		if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
634 			flags |= FS_XFLAG_DAX;
635 		if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
636 			flags |= FS_XFLAG_COWEXTSIZE;
637 	}
638 
639 	if (xfs_inode_has_attr_fork(ip))
640 		flags |= FS_XFLAG_HASATTR;
641 	return flags;
642 }
643 
644 /*
645  * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
646  * is allowed, otherwise it has to be an exact match. If a CI match is found,
647  * ci_name->name will point to a the actual name (caller must free) or
648  * will be set to NULL if an exact match is found.
649  */
650 int
xfs_lookup(struct xfs_inode * dp,const struct xfs_name * name,struct xfs_inode ** ipp,struct xfs_name * ci_name)651 xfs_lookup(
652 	struct xfs_inode	*dp,
653 	const struct xfs_name	*name,
654 	struct xfs_inode	**ipp,
655 	struct xfs_name		*ci_name)
656 {
657 	xfs_ino_t		inum;
658 	int			error;
659 
660 	trace_xfs_lookup(dp, name);
661 
662 	if (xfs_is_shutdown(dp->i_mount))
663 		return -EIO;
664 
665 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
666 	if (error)
667 		goto out_unlock;
668 
669 	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
670 	if (error)
671 		goto out_free_name;
672 
673 	return 0;
674 
675 out_free_name:
676 	if (ci_name)
677 		kmem_free(ci_name->name);
678 out_unlock:
679 	*ipp = NULL;
680 	return error;
681 }
682 
683 /* Propagate di_flags from a parent inode to a child inode. */
684 static void
xfs_inode_inherit_flags(struct xfs_inode * ip,const struct xfs_inode * pip)685 xfs_inode_inherit_flags(
686 	struct xfs_inode	*ip,
687 	const struct xfs_inode	*pip)
688 {
689 	unsigned int		di_flags = 0;
690 	xfs_failaddr_t		failaddr;
691 	umode_t			mode = VFS_I(ip)->i_mode;
692 
693 	if (S_ISDIR(mode)) {
694 		if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
695 			di_flags |= XFS_DIFLAG_RTINHERIT;
696 		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
697 			di_flags |= XFS_DIFLAG_EXTSZINHERIT;
698 			ip->i_extsize = pip->i_extsize;
699 		}
700 		if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
701 			di_flags |= XFS_DIFLAG_PROJINHERIT;
702 	} else if (S_ISREG(mode)) {
703 		if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
704 		    xfs_has_realtime(ip->i_mount))
705 			di_flags |= XFS_DIFLAG_REALTIME;
706 		if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
707 			di_flags |= XFS_DIFLAG_EXTSIZE;
708 			ip->i_extsize = pip->i_extsize;
709 		}
710 	}
711 	if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
712 	    xfs_inherit_noatime)
713 		di_flags |= XFS_DIFLAG_NOATIME;
714 	if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
715 	    xfs_inherit_nodump)
716 		di_flags |= XFS_DIFLAG_NODUMP;
717 	if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
718 	    xfs_inherit_sync)
719 		di_flags |= XFS_DIFLAG_SYNC;
720 	if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
721 	    xfs_inherit_nosymlinks)
722 		di_flags |= XFS_DIFLAG_NOSYMLINKS;
723 	if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
724 	    xfs_inherit_nodefrag)
725 		di_flags |= XFS_DIFLAG_NODEFRAG;
726 	if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
727 		di_flags |= XFS_DIFLAG_FILESTREAM;
728 
729 	ip->i_diflags |= di_flags;
730 
731 	/*
732 	 * Inode verifiers on older kernels only check that the extent size
733 	 * hint is an integer multiple of the rt extent size on realtime files.
734 	 * They did not check the hint alignment on a directory with both
735 	 * rtinherit and extszinherit flags set.  If the misaligned hint is
736 	 * propagated from a directory into a new realtime file, new file
737 	 * allocations will fail due to math errors in the rt allocator and/or
738 	 * trip the verifiers.  Validate the hint settings in the new file so
739 	 * that we don't let broken hints propagate.
740 	 */
741 	failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
742 			VFS_I(ip)->i_mode, ip->i_diflags);
743 	if (failaddr) {
744 		ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
745 				   XFS_DIFLAG_EXTSZINHERIT);
746 		ip->i_extsize = 0;
747 	}
748 }
749 
750 /* Propagate di_flags2 from a parent inode to a child inode. */
751 static void
xfs_inode_inherit_flags2(struct xfs_inode * ip,const struct xfs_inode * pip)752 xfs_inode_inherit_flags2(
753 	struct xfs_inode	*ip,
754 	const struct xfs_inode	*pip)
755 {
756 	xfs_failaddr_t		failaddr;
757 
758 	if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
759 		ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
760 		ip->i_cowextsize = pip->i_cowextsize;
761 	}
762 	if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
763 		ip->i_diflags2 |= XFS_DIFLAG2_DAX;
764 
765 	/* Don't let invalid cowextsize hints propagate. */
766 	failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
767 			VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
768 	if (failaddr) {
769 		ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
770 		ip->i_cowextsize = 0;
771 	}
772 }
773 
774 /*
775  * Initialise a newly allocated inode and return the in-core inode to the
776  * caller locked exclusively.
777  */
778 int
xfs_init_new_inode(struct mnt_idmap * idmap,struct xfs_trans * tp,struct xfs_inode * pip,xfs_ino_t ino,umode_t mode,xfs_nlink_t nlink,dev_t rdev,prid_t prid,bool init_xattrs,struct xfs_inode ** ipp)779 xfs_init_new_inode(
780 	struct mnt_idmap	*idmap,
781 	struct xfs_trans	*tp,
782 	struct xfs_inode	*pip,
783 	xfs_ino_t		ino,
784 	umode_t			mode,
785 	xfs_nlink_t		nlink,
786 	dev_t			rdev,
787 	prid_t			prid,
788 	bool			init_xattrs,
789 	struct xfs_inode	**ipp)
790 {
791 	struct inode		*dir = pip ? VFS_I(pip) : NULL;
792 	struct xfs_mount	*mp = tp->t_mountp;
793 	struct xfs_inode	*ip;
794 	unsigned int		flags;
795 	int			error;
796 	struct timespec64	tv;
797 	struct inode		*inode;
798 
799 	/*
800 	 * Protect against obviously corrupt allocation btree records. Later
801 	 * xfs_iget checks will catch re-allocation of other active in-memory
802 	 * and on-disk inodes. If we don't catch reallocating the parent inode
803 	 * here we will deadlock in xfs_iget() so we have to do these checks
804 	 * first.
805 	 */
806 	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
807 		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
808 		return -EFSCORRUPTED;
809 	}
810 
811 	/*
812 	 * Get the in-core inode with the lock held exclusively to prevent
813 	 * others from looking at until we're done.
814 	 */
815 	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
816 	if (error)
817 		return error;
818 
819 	ASSERT(ip != NULL);
820 	inode = VFS_I(ip);
821 	set_nlink(inode, nlink);
822 	inode->i_rdev = rdev;
823 	ip->i_projid = prid;
824 
825 	if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) {
826 		inode_fsuid_set(inode, idmap);
827 		inode->i_gid = dir->i_gid;
828 		inode->i_mode = mode;
829 	} else {
830 		inode_init_owner(idmap, inode, dir, mode);
831 	}
832 
833 	/*
834 	 * If the group ID of the new file does not match the effective group
835 	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
836 	 * (and only if the irix_sgid_inherit compatibility variable is set).
837 	 */
838 	if (irix_sgid_inherit && (inode->i_mode & S_ISGID) &&
839 	    !vfsgid_in_group_p(i_gid_into_vfsgid(idmap, inode)))
840 		inode->i_mode &= ~S_ISGID;
841 
842 	ip->i_disk_size = 0;
843 	ip->i_df.if_nextents = 0;
844 	ASSERT(ip->i_nblocks == 0);
845 
846 	tv = inode_set_ctime_current(inode);
847 	inode->i_mtime = tv;
848 	inode->i_atime = tv;
849 
850 	ip->i_extsize = 0;
851 	ip->i_diflags = 0;
852 
853 	if (xfs_has_v3inodes(mp)) {
854 		inode_set_iversion(inode, 1);
855 		ip->i_cowextsize = 0;
856 		ip->i_crtime = tv;
857 	}
858 
859 	flags = XFS_ILOG_CORE;
860 	switch (mode & S_IFMT) {
861 	case S_IFIFO:
862 	case S_IFCHR:
863 	case S_IFBLK:
864 	case S_IFSOCK:
865 		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
866 		flags |= XFS_ILOG_DEV;
867 		break;
868 	case S_IFREG:
869 	case S_IFDIR:
870 		if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
871 			xfs_inode_inherit_flags(ip, pip);
872 		if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
873 			xfs_inode_inherit_flags2(ip, pip);
874 		fallthrough;
875 	case S_IFLNK:
876 		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
877 		ip->i_df.if_bytes = 0;
878 		ip->i_df.if_u1.if_root = NULL;
879 		break;
880 	default:
881 		ASSERT(0);
882 	}
883 
884 	/*
885 	 * If we need to create attributes immediately after allocating the
886 	 * inode, initialise an empty attribute fork right now. We use the
887 	 * default fork offset for attributes here as we don't know exactly what
888 	 * size or how many attributes we might be adding. We can do this
889 	 * safely here because we know the data fork is completely empty and
890 	 * this saves us from needing to run a separate transaction to set the
891 	 * fork offset in the immediate future.
892 	 */
893 	if (init_xattrs && xfs_has_attr(mp)) {
894 		ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
895 		xfs_ifork_init_attr(ip, XFS_DINODE_FMT_EXTENTS, 0);
896 	}
897 
898 	/*
899 	 * Log the new values stuffed into the inode.
900 	 */
901 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
902 	xfs_trans_log_inode(tp, ip, flags);
903 
904 	/* now that we have an i_mode we can setup the inode structure */
905 	xfs_setup_inode(ip);
906 
907 	*ipp = ip;
908 	return 0;
909 }
910 
911 /*
912  * Decrement the link count on an inode & log the change.  If this causes the
913  * link count to go to zero, move the inode to AGI unlinked list so that it can
914  * be freed when the last active reference goes away via xfs_inactive().
915  */
916 static int			/* error */
xfs_droplink(xfs_trans_t * tp,xfs_inode_t * ip)917 xfs_droplink(
918 	xfs_trans_t *tp,
919 	xfs_inode_t *ip)
920 {
921 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
922 
923 	drop_nlink(VFS_I(ip));
924 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
925 
926 	if (VFS_I(ip)->i_nlink)
927 		return 0;
928 
929 	return xfs_iunlink(tp, ip);
930 }
931 
932 /*
933  * Increment the link count on an inode & log the change.
934  */
935 static void
xfs_bumplink(xfs_trans_t * tp,xfs_inode_t * ip)936 xfs_bumplink(
937 	xfs_trans_t *tp,
938 	xfs_inode_t *ip)
939 {
940 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
941 
942 	inc_nlink(VFS_I(ip));
943 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
944 }
945 
946 int
xfs_create(struct mnt_idmap * idmap,xfs_inode_t * dp,struct xfs_name * name,umode_t mode,dev_t rdev,bool init_xattrs,xfs_inode_t ** ipp)947 xfs_create(
948 	struct mnt_idmap	*idmap,
949 	xfs_inode_t		*dp,
950 	struct xfs_name		*name,
951 	umode_t			mode,
952 	dev_t			rdev,
953 	bool			init_xattrs,
954 	xfs_inode_t		**ipp)
955 {
956 	int			is_dir = S_ISDIR(mode);
957 	struct xfs_mount	*mp = dp->i_mount;
958 	struct xfs_inode	*ip = NULL;
959 	struct xfs_trans	*tp = NULL;
960 	int			error;
961 	bool                    unlock_dp_on_error = false;
962 	prid_t			prid;
963 	struct xfs_dquot	*udqp = NULL;
964 	struct xfs_dquot	*gdqp = NULL;
965 	struct xfs_dquot	*pdqp = NULL;
966 	struct xfs_trans_res	*tres;
967 	uint			resblks;
968 	xfs_ino_t		ino;
969 
970 	trace_xfs_create(dp, name);
971 
972 	if (xfs_is_shutdown(mp))
973 		return -EIO;
974 
975 	prid = xfs_get_initial_prid(dp);
976 
977 	/*
978 	 * Make sure that we have allocated dquot(s) on disk.
979 	 */
980 	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
981 			mapped_fsgid(idmap, &init_user_ns), prid,
982 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
983 			&udqp, &gdqp, &pdqp);
984 	if (error)
985 		return error;
986 
987 	if (is_dir) {
988 		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
989 		tres = &M_RES(mp)->tr_mkdir;
990 	} else {
991 		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
992 		tres = &M_RES(mp)->tr_create;
993 	}
994 
995 	/*
996 	 * Initially assume that the file does not exist and
997 	 * reserve the resources for that case.  If that is not
998 	 * the case we'll drop the one we have and get a more
999 	 * appropriate transaction later.
1000 	 */
1001 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1002 			&tp);
1003 	if (error == -ENOSPC) {
1004 		/* flush outstanding delalloc blocks and retry */
1005 		xfs_flush_inodes(mp);
1006 		error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1007 				resblks, &tp);
1008 	}
1009 	if (error)
1010 		goto out_release_dquots;
1011 
1012 	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1013 	unlock_dp_on_error = true;
1014 
1015 	/*
1016 	 * A newly created regular or special file just has one directory
1017 	 * entry pointing to them, but a directory also the "." entry
1018 	 * pointing to itself.
1019 	 */
1020 	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1021 	if (!error)
1022 		error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1023 				is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
1024 	if (error)
1025 		goto out_trans_cancel;
1026 
1027 	/*
1028 	 * Now we join the directory inode to the transaction.  We do not do it
1029 	 * earlier because xfs_dialloc might commit the previous transaction
1030 	 * (and release all the locks).  An error from here on will result in
1031 	 * the transaction cancel unlocking dp so don't do it explicitly in the
1032 	 * error path.
1033 	 */
1034 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1035 	unlock_dp_on_error = false;
1036 
1037 	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1038 					resblks - XFS_IALLOC_SPACE_RES(mp));
1039 	if (error) {
1040 		ASSERT(error != -ENOSPC);
1041 		goto out_trans_cancel;
1042 	}
1043 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1044 	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1045 
1046 	if (is_dir) {
1047 		error = xfs_dir_init(tp, ip, dp);
1048 		if (error)
1049 			goto out_trans_cancel;
1050 
1051 		xfs_bumplink(tp, dp);
1052 	}
1053 
1054 	/*
1055 	 * If this is a synchronous mount, make sure that the
1056 	 * create transaction goes to disk before returning to
1057 	 * the user.
1058 	 */
1059 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1060 		xfs_trans_set_sync(tp);
1061 
1062 	/*
1063 	 * Attach the dquot(s) to the inodes and modify them incore.
1064 	 * These ids of the inode couldn't have changed since the new
1065 	 * inode has been locked ever since it was created.
1066 	 */
1067 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1068 
1069 	error = xfs_trans_commit(tp);
1070 	if (error)
1071 		goto out_release_inode;
1072 
1073 	xfs_qm_dqrele(udqp);
1074 	xfs_qm_dqrele(gdqp);
1075 	xfs_qm_dqrele(pdqp);
1076 
1077 	*ipp = ip;
1078 	return 0;
1079 
1080  out_trans_cancel:
1081 	xfs_trans_cancel(tp);
1082  out_release_inode:
1083 	/*
1084 	 * Wait until after the current transaction is aborted to finish the
1085 	 * setup of the inode and release the inode.  This prevents recursive
1086 	 * transactions and deadlocks from xfs_inactive.
1087 	 */
1088 	if (ip) {
1089 		xfs_finish_inode_setup(ip);
1090 		xfs_irele(ip);
1091 	}
1092  out_release_dquots:
1093 	xfs_qm_dqrele(udqp);
1094 	xfs_qm_dqrele(gdqp);
1095 	xfs_qm_dqrele(pdqp);
1096 
1097 	if (unlock_dp_on_error)
1098 		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1099 	return error;
1100 }
1101 
1102 int
xfs_create_tmpfile(struct mnt_idmap * idmap,struct xfs_inode * dp,umode_t mode,struct xfs_inode ** ipp)1103 xfs_create_tmpfile(
1104 	struct mnt_idmap	*idmap,
1105 	struct xfs_inode	*dp,
1106 	umode_t			mode,
1107 	struct xfs_inode	**ipp)
1108 {
1109 	struct xfs_mount	*mp = dp->i_mount;
1110 	struct xfs_inode	*ip = NULL;
1111 	struct xfs_trans	*tp = NULL;
1112 	int			error;
1113 	prid_t                  prid;
1114 	struct xfs_dquot	*udqp = NULL;
1115 	struct xfs_dquot	*gdqp = NULL;
1116 	struct xfs_dquot	*pdqp = NULL;
1117 	struct xfs_trans_res	*tres;
1118 	uint			resblks;
1119 	xfs_ino_t		ino;
1120 
1121 	if (xfs_is_shutdown(mp))
1122 		return -EIO;
1123 
1124 	prid = xfs_get_initial_prid(dp);
1125 
1126 	/*
1127 	 * Make sure that we have allocated dquot(s) on disk.
1128 	 */
1129 	error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(idmap, &init_user_ns),
1130 			mapped_fsgid(idmap, &init_user_ns), prid,
1131 			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1132 			&udqp, &gdqp, &pdqp);
1133 	if (error)
1134 		return error;
1135 
1136 	resblks = XFS_IALLOC_SPACE_RES(mp);
1137 	tres = &M_RES(mp)->tr_create_tmpfile;
1138 
1139 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1140 			&tp);
1141 	if (error)
1142 		goto out_release_dquots;
1143 
1144 	error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1145 	if (!error)
1146 		error = xfs_init_new_inode(idmap, tp, dp, ino, mode,
1147 				0, 0, prid, false, &ip);
1148 	if (error)
1149 		goto out_trans_cancel;
1150 
1151 	if (xfs_has_wsync(mp))
1152 		xfs_trans_set_sync(tp);
1153 
1154 	/*
1155 	 * Attach the dquot(s) to the inodes and modify them incore.
1156 	 * These ids of the inode couldn't have changed since the new
1157 	 * inode has been locked ever since it was created.
1158 	 */
1159 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1160 
1161 	error = xfs_iunlink(tp, ip);
1162 	if (error)
1163 		goto out_trans_cancel;
1164 
1165 	error = xfs_trans_commit(tp);
1166 	if (error)
1167 		goto out_release_inode;
1168 
1169 	xfs_qm_dqrele(udqp);
1170 	xfs_qm_dqrele(gdqp);
1171 	xfs_qm_dqrele(pdqp);
1172 
1173 	*ipp = ip;
1174 	return 0;
1175 
1176  out_trans_cancel:
1177 	xfs_trans_cancel(tp);
1178  out_release_inode:
1179 	/*
1180 	 * Wait until after the current transaction is aborted to finish the
1181 	 * setup of the inode and release the inode.  This prevents recursive
1182 	 * transactions and deadlocks from xfs_inactive.
1183 	 */
1184 	if (ip) {
1185 		xfs_finish_inode_setup(ip);
1186 		xfs_irele(ip);
1187 	}
1188  out_release_dquots:
1189 	xfs_qm_dqrele(udqp);
1190 	xfs_qm_dqrele(gdqp);
1191 	xfs_qm_dqrele(pdqp);
1192 
1193 	return error;
1194 }
1195 
1196 int
xfs_link(xfs_inode_t * tdp,xfs_inode_t * sip,struct xfs_name * target_name)1197 xfs_link(
1198 	xfs_inode_t		*tdp,
1199 	xfs_inode_t		*sip,
1200 	struct xfs_name		*target_name)
1201 {
1202 	xfs_mount_t		*mp = tdp->i_mount;
1203 	xfs_trans_t		*tp;
1204 	int			error, nospace_error = 0;
1205 	int			resblks;
1206 
1207 	trace_xfs_link(tdp, target_name);
1208 
1209 	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1210 
1211 	if (xfs_is_shutdown(mp))
1212 		return -EIO;
1213 
1214 	error = xfs_qm_dqattach(sip);
1215 	if (error)
1216 		goto std_return;
1217 
1218 	error = xfs_qm_dqattach(tdp);
1219 	if (error)
1220 		goto std_return;
1221 
1222 	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1223 	error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
1224 			&tp, &nospace_error);
1225 	if (error)
1226 		goto std_return;
1227 
1228 	/*
1229 	 * If we are using project inheritance, we only allow hard link
1230 	 * creation in our tree when the project IDs are the same; else
1231 	 * the tree quota mechanism could be circumvented.
1232 	 */
1233 	if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1234 		     tdp->i_projid != sip->i_projid)) {
1235 		error = -EXDEV;
1236 		goto error_return;
1237 	}
1238 
1239 	if (!resblks) {
1240 		error = xfs_dir_canenter(tp, tdp, target_name);
1241 		if (error)
1242 			goto error_return;
1243 	}
1244 
1245 	/*
1246 	 * Handle initial link state of O_TMPFILE inode
1247 	 */
1248 	if (VFS_I(sip)->i_nlink == 0) {
1249 		struct xfs_perag	*pag;
1250 
1251 		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
1252 		error = xfs_iunlink_remove(tp, pag, sip);
1253 		xfs_perag_put(pag);
1254 		if (error)
1255 			goto error_return;
1256 	}
1257 
1258 	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1259 				   resblks);
1260 	if (error)
1261 		goto error_return;
1262 	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1263 	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1264 
1265 	xfs_bumplink(tp, sip);
1266 
1267 	/*
1268 	 * If this is a synchronous mount, make sure that the
1269 	 * link transaction goes to disk before returning to
1270 	 * the user.
1271 	 */
1272 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1273 		xfs_trans_set_sync(tp);
1274 
1275 	return xfs_trans_commit(tp);
1276 
1277  error_return:
1278 	xfs_trans_cancel(tp);
1279  std_return:
1280 	if (error == -ENOSPC && nospace_error)
1281 		error = nospace_error;
1282 	return error;
1283 }
1284 
1285 /* Clear the reflink flag and the cowblocks tag if possible. */
1286 static void
xfs_itruncate_clear_reflink_flags(struct xfs_inode * ip)1287 xfs_itruncate_clear_reflink_flags(
1288 	struct xfs_inode	*ip)
1289 {
1290 	struct xfs_ifork	*dfork;
1291 	struct xfs_ifork	*cfork;
1292 
1293 	if (!xfs_is_reflink_inode(ip))
1294 		return;
1295 	dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1296 	cfork = xfs_ifork_ptr(ip, XFS_COW_FORK);
1297 	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1298 		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1299 	if (cfork->if_bytes == 0)
1300 		xfs_inode_clear_cowblocks_tag(ip);
1301 }
1302 
1303 /*
1304  * Free up the underlying blocks past new_size.  The new size must be smaller
1305  * than the current size.  This routine can be used both for the attribute and
1306  * data fork, and does not modify the inode size, which is left to the caller.
1307  *
1308  * The transaction passed to this routine must have made a permanent log
1309  * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1310  * given transaction and start new ones, so make sure everything involved in
1311  * the transaction is tidy before calling here.  Some transaction will be
1312  * returned to the caller to be committed.  The incoming transaction must
1313  * already include the inode, and both inode locks must be held exclusively.
1314  * The inode must also be "held" within the transaction.  On return the inode
1315  * will be "held" within the returned transaction.  This routine does NOT
1316  * require any disk space to be reserved for it within the transaction.
1317  *
1318  * If we get an error, we must return with the inode locked and linked into the
1319  * current transaction. This keeps things simple for the higher level code,
1320  * because it always knows that the inode is locked and held in the transaction
1321  * that returns to it whether errors occur or not.  We don't mark the inode
1322  * dirty on error so that transactions can be easily aborted if possible.
1323  */
1324 int
xfs_itruncate_extents_flags(struct xfs_trans ** tpp,struct xfs_inode * ip,int whichfork,xfs_fsize_t new_size,int flags)1325 xfs_itruncate_extents_flags(
1326 	struct xfs_trans	**tpp,
1327 	struct xfs_inode	*ip,
1328 	int			whichfork,
1329 	xfs_fsize_t		new_size,
1330 	int			flags)
1331 {
1332 	struct xfs_mount	*mp = ip->i_mount;
1333 	struct xfs_trans	*tp = *tpp;
1334 	xfs_fileoff_t		first_unmap_block;
1335 	xfs_filblks_t		unmap_len;
1336 	int			error = 0;
1337 
1338 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1339 	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1340 	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1341 	ASSERT(new_size <= XFS_ISIZE(ip));
1342 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1343 	ASSERT(ip->i_itemp != NULL);
1344 	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1345 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1346 
1347 	trace_xfs_itruncate_extents_start(ip, new_size);
1348 
1349 	flags |= xfs_bmapi_aflag(whichfork);
1350 
1351 	/*
1352 	 * Since it is possible for space to become allocated beyond
1353 	 * the end of the file (in a crash where the space is allocated
1354 	 * but the inode size is not yet updated), simply remove any
1355 	 * blocks which show up between the new EOF and the maximum
1356 	 * possible file size.
1357 	 *
1358 	 * We have to free all the blocks to the bmbt maximum offset, even if
1359 	 * the page cache can't scale that far.
1360 	 */
1361 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1362 	if (!xfs_verify_fileoff(mp, first_unmap_block)) {
1363 		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1364 		return 0;
1365 	}
1366 
1367 	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
1368 	while (unmap_len > 0) {
1369 		ASSERT(tp->t_highest_agno == NULLAGNUMBER);
1370 		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
1371 				flags, XFS_ITRUNC_MAX_EXTENTS);
1372 		if (error)
1373 			goto out;
1374 
1375 		/* free the just unmapped extents */
1376 		error = xfs_defer_finish(&tp);
1377 		if (error)
1378 			goto out;
1379 	}
1380 
1381 	if (whichfork == XFS_DATA_FORK) {
1382 		/* Remove all pending CoW reservations. */
1383 		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1384 				first_unmap_block, XFS_MAX_FILEOFF, true);
1385 		if (error)
1386 			goto out;
1387 
1388 		xfs_itruncate_clear_reflink_flags(ip);
1389 	}
1390 
1391 	/*
1392 	 * Always re-log the inode so that our permanent transaction can keep
1393 	 * on rolling it forward in the log.
1394 	 */
1395 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1396 
1397 	trace_xfs_itruncate_extents_end(ip, new_size);
1398 
1399 out:
1400 	*tpp = tp;
1401 	return error;
1402 }
1403 
1404 int
xfs_release(xfs_inode_t * ip)1405 xfs_release(
1406 	xfs_inode_t	*ip)
1407 {
1408 	xfs_mount_t	*mp = ip->i_mount;
1409 	int		error = 0;
1410 
1411 	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1412 		return 0;
1413 
1414 	/* If this is a read-only mount, don't do this (would generate I/O) */
1415 	if (xfs_is_readonly(mp))
1416 		return 0;
1417 
1418 	if (!xfs_is_shutdown(mp)) {
1419 		int truncated;
1420 
1421 		/*
1422 		 * If we previously truncated this file and removed old data
1423 		 * in the process, we want to initiate "early" writeout on
1424 		 * the last close.  This is an attempt to combat the notorious
1425 		 * NULL files problem which is particularly noticeable from a
1426 		 * truncate down, buffered (re-)write (delalloc), followed by
1427 		 * a crash.  What we are effectively doing here is
1428 		 * significantly reducing the time window where we'd otherwise
1429 		 * be exposed to that problem.
1430 		 */
1431 		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1432 		if (truncated) {
1433 			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1434 			if (ip->i_delayed_blks > 0) {
1435 				error = filemap_flush(VFS_I(ip)->i_mapping);
1436 				if (error)
1437 					return error;
1438 			}
1439 		}
1440 	}
1441 
1442 	if (VFS_I(ip)->i_nlink == 0)
1443 		return 0;
1444 
1445 	/*
1446 	 * If we can't get the iolock just skip truncating the blocks past EOF
1447 	 * because we could deadlock with the mmap_lock otherwise. We'll get
1448 	 * another chance to drop them once the last reference to the inode is
1449 	 * dropped, so we'll never leak blocks permanently.
1450 	 */
1451 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
1452 		return 0;
1453 
1454 	if (xfs_can_free_eofblocks(ip, false)) {
1455 		/*
1456 		 * Check if the inode is being opened, written and closed
1457 		 * frequently and we have delayed allocation blocks outstanding
1458 		 * (e.g. streaming writes from the NFS server), truncating the
1459 		 * blocks past EOF will cause fragmentation to occur.
1460 		 *
1461 		 * In this case don't do the truncation, but we have to be
1462 		 * careful how we detect this case. Blocks beyond EOF show up as
1463 		 * i_delayed_blks even when the inode is clean, so we need to
1464 		 * truncate them away first before checking for a dirty release.
1465 		 * Hence on the first dirty close we will still remove the
1466 		 * speculative allocation, but after that we will leave it in
1467 		 * place.
1468 		 */
1469 		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1470 			goto out_unlock;
1471 
1472 		error = xfs_free_eofblocks(ip);
1473 		if (error)
1474 			goto out_unlock;
1475 
1476 		/* delalloc blocks after truncation means it really is dirty */
1477 		if (ip->i_delayed_blks)
1478 			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1479 	}
1480 
1481 out_unlock:
1482 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1483 	return error;
1484 }
1485 
1486 /*
1487  * xfs_inactive_truncate
1488  *
1489  * Called to perform a truncate when an inode becomes unlinked.
1490  */
1491 STATIC int
xfs_inactive_truncate(struct xfs_inode * ip)1492 xfs_inactive_truncate(
1493 	struct xfs_inode *ip)
1494 {
1495 	struct xfs_mount	*mp = ip->i_mount;
1496 	struct xfs_trans	*tp;
1497 	int			error;
1498 
1499 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1500 	if (error) {
1501 		ASSERT(xfs_is_shutdown(mp));
1502 		return error;
1503 	}
1504 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1505 	xfs_trans_ijoin(tp, ip, 0);
1506 
1507 	/*
1508 	 * Log the inode size first to prevent stale data exposure in the event
1509 	 * of a system crash before the truncate completes. See the related
1510 	 * comment in xfs_vn_setattr_size() for details.
1511 	 */
1512 	ip->i_disk_size = 0;
1513 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1514 
1515 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1516 	if (error)
1517 		goto error_trans_cancel;
1518 
1519 	ASSERT(ip->i_df.if_nextents == 0);
1520 
1521 	error = xfs_trans_commit(tp);
1522 	if (error)
1523 		goto error_unlock;
1524 
1525 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1526 	return 0;
1527 
1528 error_trans_cancel:
1529 	xfs_trans_cancel(tp);
1530 error_unlock:
1531 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1532 	return error;
1533 }
1534 
1535 /*
1536  * xfs_inactive_ifree()
1537  *
1538  * Perform the inode free when an inode is unlinked.
1539  */
1540 STATIC int
xfs_inactive_ifree(struct xfs_inode * ip)1541 xfs_inactive_ifree(
1542 	struct xfs_inode *ip)
1543 {
1544 	struct xfs_mount	*mp = ip->i_mount;
1545 	struct xfs_trans	*tp;
1546 	int			error;
1547 
1548 	/*
1549 	 * We try to use a per-AG reservation for any block needed by the finobt
1550 	 * tree, but as the finobt feature predates the per-AG reservation
1551 	 * support a degraded file system might not have enough space for the
1552 	 * reservation at mount time.  In that case try to dip into the reserved
1553 	 * pool and pray.
1554 	 *
1555 	 * Send a warning if the reservation does happen to fail, as the inode
1556 	 * now remains allocated and sits on the unlinked list until the fs is
1557 	 * repaired.
1558 	 */
1559 	if (unlikely(mp->m_finobt_nores)) {
1560 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1561 				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1562 				&tp);
1563 	} else {
1564 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1565 	}
1566 	if (error) {
1567 		if (error == -ENOSPC) {
1568 			xfs_warn_ratelimited(mp,
1569 			"Failed to remove inode(s) from unlinked list. "
1570 			"Please free space, unmount and run xfs_repair.");
1571 		} else {
1572 			ASSERT(xfs_is_shutdown(mp));
1573 		}
1574 		return error;
1575 	}
1576 
1577 	/*
1578 	 * We do not hold the inode locked across the entire rolling transaction
1579 	 * here. We only need to hold it for the first transaction that
1580 	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1581 	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1582 	 * here breaks the relationship between cluster buffer invalidation and
1583 	 * stale inode invalidation on cluster buffer item journal commit
1584 	 * completion, and can result in leaving dirty stale inodes hanging
1585 	 * around in memory.
1586 	 *
1587 	 * We have no need for serialising this inode operation against other
1588 	 * operations - we freed the inode and hence reallocation is required
1589 	 * and that will serialise on reallocating the space the deferops need
1590 	 * to free. Hence we can unlock the inode on the first commit of
1591 	 * the transaction rather than roll it right through the deferops. This
1592 	 * avoids relogging the XFS_ISTALE inode.
1593 	 *
1594 	 * We check that xfs_ifree() hasn't grown an internal transaction roll
1595 	 * by asserting that the inode is still locked when it returns.
1596 	 */
1597 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1598 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1599 
1600 	error = xfs_ifree(tp, ip);
1601 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1602 	if (error) {
1603 		/*
1604 		 * If we fail to free the inode, shut down.  The cancel
1605 		 * might do that, we need to make sure.  Otherwise the
1606 		 * inode might be lost for a long time or forever.
1607 		 */
1608 		if (!xfs_is_shutdown(mp)) {
1609 			xfs_notice(mp, "%s: xfs_ifree returned error %d",
1610 				__func__, error);
1611 			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1612 		}
1613 		xfs_trans_cancel(tp);
1614 		return error;
1615 	}
1616 
1617 	/*
1618 	 * Credit the quota account(s). The inode is gone.
1619 	 */
1620 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1621 
1622 	return xfs_trans_commit(tp);
1623 }
1624 
1625 /*
1626  * Returns true if we need to update the on-disk metadata before we can free
1627  * the memory used by this inode.  Updates include freeing post-eof
1628  * preallocations; freeing COW staging extents; and marking the inode free in
1629  * the inobt if it is on the unlinked list.
1630  */
1631 bool
xfs_inode_needs_inactive(struct xfs_inode * ip)1632 xfs_inode_needs_inactive(
1633 	struct xfs_inode	*ip)
1634 {
1635 	struct xfs_mount	*mp = ip->i_mount;
1636 	struct xfs_ifork	*cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
1637 
1638 	/*
1639 	 * If the inode is already free, then there can be nothing
1640 	 * to clean up here.
1641 	 */
1642 	if (VFS_I(ip)->i_mode == 0)
1643 		return false;
1644 
1645 	/*
1646 	 * If this is a read-only mount, don't do this (would generate I/O)
1647 	 * unless we're in log recovery and cleaning the iunlinked list.
1648 	 */
1649 	if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
1650 		return false;
1651 
1652 	/* If the log isn't running, push inodes straight to reclaim. */
1653 	if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp))
1654 		return false;
1655 
1656 	/* Metadata inodes require explicit resource cleanup. */
1657 	if (xfs_is_metadata_inode(ip))
1658 		return false;
1659 
1660 	/* Want to clean out the cow blocks if there are any. */
1661 	if (cow_ifp && cow_ifp->if_bytes > 0)
1662 		return true;
1663 
1664 	/* Unlinked files must be freed. */
1665 	if (VFS_I(ip)->i_nlink == 0)
1666 		return true;
1667 
1668 	/*
1669 	 * This file isn't being freed, so check if there are post-eof blocks
1670 	 * to free.  @force is true because we are evicting an inode from the
1671 	 * cache.  Post-eof blocks must be freed, lest we end up with broken
1672 	 * free space accounting.
1673 	 *
1674 	 * Note: don't bother with iolock here since lockdep complains about
1675 	 * acquiring it in reclaim context. We have the only reference to the
1676 	 * inode at this point anyways.
1677 	 */
1678 	return xfs_can_free_eofblocks(ip, true);
1679 }
1680 
1681 /*
1682  * xfs_inactive
1683  *
1684  * This is called when the vnode reference count for the vnode
1685  * goes to zero.  If the file has been unlinked, then it must
1686  * now be truncated.  Also, we clear all of the read-ahead state
1687  * kept for the inode here since the file is now closed.
1688  */
1689 int
xfs_inactive(xfs_inode_t * ip)1690 xfs_inactive(
1691 	xfs_inode_t	*ip)
1692 {
1693 	struct xfs_mount	*mp;
1694 	int			error = 0;
1695 	int			truncate = 0;
1696 
1697 	/*
1698 	 * If the inode is already free, then there can be nothing
1699 	 * to clean up here.
1700 	 */
1701 	if (VFS_I(ip)->i_mode == 0) {
1702 		ASSERT(ip->i_df.if_broot_bytes == 0);
1703 		goto out;
1704 	}
1705 
1706 	mp = ip->i_mount;
1707 	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1708 
1709 	/*
1710 	 * If this is a read-only mount, don't do this (would generate I/O)
1711 	 * unless we're in log recovery and cleaning the iunlinked list.
1712 	 */
1713 	if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
1714 		goto out;
1715 
1716 	/* Metadata inodes require explicit resource cleanup. */
1717 	if (xfs_is_metadata_inode(ip))
1718 		goto out;
1719 
1720 	/* Try to clean out the cow blocks if there are any. */
1721 	if (xfs_inode_has_cow_data(ip))
1722 		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1723 
1724 	if (VFS_I(ip)->i_nlink != 0) {
1725 		/*
1726 		 * force is true because we are evicting an inode from the
1727 		 * cache. Post-eof blocks must be freed, lest we end up with
1728 		 * broken free space accounting.
1729 		 *
1730 		 * Note: don't bother with iolock here since lockdep complains
1731 		 * about acquiring it in reclaim context. We have the only
1732 		 * reference to the inode at this point anyways.
1733 		 */
1734 		if (xfs_can_free_eofblocks(ip, true))
1735 			error = xfs_free_eofblocks(ip);
1736 
1737 		goto out;
1738 	}
1739 
1740 	if (S_ISREG(VFS_I(ip)->i_mode) &&
1741 	    (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1742 	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1743 		truncate = 1;
1744 
1745 	if (xfs_iflags_test(ip, XFS_IQUOTAUNCHECKED)) {
1746 		/*
1747 		 * If this inode is being inactivated during a quotacheck and
1748 		 * has not yet been scanned by quotacheck, we /must/ remove
1749 		 * the dquots from the inode before inactivation changes the
1750 		 * block and inode counts.  Most probably this is a result of
1751 		 * reloading the incore iunlinked list to purge unrecovered
1752 		 * unlinked inodes.
1753 		 */
1754 		xfs_qm_dqdetach(ip);
1755 	} else {
1756 		error = xfs_qm_dqattach(ip);
1757 		if (error)
1758 			goto out;
1759 	}
1760 
1761 	if (S_ISLNK(VFS_I(ip)->i_mode))
1762 		error = xfs_inactive_symlink(ip);
1763 	else if (truncate)
1764 		error = xfs_inactive_truncate(ip);
1765 	if (error)
1766 		goto out;
1767 
1768 	/*
1769 	 * If there are attributes associated with the file then blow them away
1770 	 * now.  The code calls a routine that recursively deconstructs the
1771 	 * attribute fork. If also blows away the in-core attribute fork.
1772 	 */
1773 	if (xfs_inode_has_attr_fork(ip)) {
1774 		error = xfs_attr_inactive(ip);
1775 		if (error)
1776 			goto out;
1777 	}
1778 
1779 	ASSERT(ip->i_forkoff == 0);
1780 
1781 	/*
1782 	 * Free the inode.
1783 	 */
1784 	error = xfs_inactive_ifree(ip);
1785 
1786 out:
1787 	/*
1788 	 * We're done making metadata updates for this inode, so we can release
1789 	 * the attached dquots.
1790 	 */
1791 	xfs_qm_dqdetach(ip);
1792 	return error;
1793 }
1794 
1795 /*
1796  * In-Core Unlinked List Lookups
1797  * =============================
1798  *
1799  * Every inode is supposed to be reachable from some other piece of metadata
1800  * with the exception of the root directory.  Inodes with a connection to a
1801  * file descriptor but not linked from anywhere in the on-disk directory tree
1802  * are collectively known as unlinked inodes, though the filesystem itself
1803  * maintains links to these inodes so that on-disk metadata are consistent.
1804  *
1805  * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
1806  * header contains a number of buckets that point to an inode, and each inode
1807  * record has a pointer to the next inode in the hash chain.  This
1808  * singly-linked list causes scaling problems in the iunlink remove function
1809  * because we must walk that list to find the inode that points to the inode
1810  * being removed from the unlinked hash bucket list.
1811  *
1812  * Hence we keep an in-memory double linked list to link each inode on an
1813  * unlinked list. Because there are 64 unlinked lists per AGI, keeping pointer
1814  * based lists would require having 64 list heads in the perag, one for each
1815  * list. This is expensive in terms of memory (think millions of AGs) and cache
1816  * misses on lookups. Instead, use the fact that inodes on the unlinked list
1817  * must be referenced at the VFS level to keep them on the list and hence we
1818  * have an existence guarantee for inodes on the unlinked list.
1819  *
1820  * Given we have an existence guarantee, we can use lockless inode cache lookups
1821  * to resolve aginos to xfs inodes. This means we only need 8 bytes per inode
1822  * for the double linked unlinked list, and we don't need any extra locking to
1823  * keep the list safe as all manipulations are done under the AGI buffer lock.
1824  * Keeping the list up to date does not require memory allocation, just finding
1825  * the XFS inode and updating the next/prev unlinked list aginos.
1826  */
1827 
1828 /*
1829  * Find an inode on the unlinked list. This does not take references to the
1830  * inode as we have existence guarantees by holding the AGI buffer lock and that
1831  * only unlinked, referenced inodes can be on the unlinked inode list.  If we
1832  * don't find the inode in cache, then let the caller handle the situation.
1833  */
1834 static struct xfs_inode *
xfs_iunlink_lookup(struct xfs_perag * pag,xfs_agino_t agino)1835 xfs_iunlink_lookup(
1836 	struct xfs_perag	*pag,
1837 	xfs_agino_t		agino)
1838 {
1839 	struct xfs_inode	*ip;
1840 
1841 	rcu_read_lock();
1842 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
1843 	if (!ip) {
1844 		/* Caller can handle inode not being in memory. */
1845 		rcu_read_unlock();
1846 		return NULL;
1847 	}
1848 
1849 	/*
1850 	 * Inode in RCU freeing limbo should not happen.  Warn about this and
1851 	 * let the caller handle the failure.
1852 	 */
1853 	if (WARN_ON_ONCE(!ip->i_ino)) {
1854 		rcu_read_unlock();
1855 		return NULL;
1856 	}
1857 	ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM));
1858 	rcu_read_unlock();
1859 	return ip;
1860 }
1861 
1862 /*
1863  * Update the prev pointer of the next agino.  Returns -ENOLINK if the inode
1864  * is not in cache.
1865  */
1866 static int
xfs_iunlink_update_backref(struct xfs_perag * pag,xfs_agino_t prev_agino,xfs_agino_t next_agino)1867 xfs_iunlink_update_backref(
1868 	struct xfs_perag	*pag,
1869 	xfs_agino_t		prev_agino,
1870 	xfs_agino_t		next_agino)
1871 {
1872 	struct xfs_inode	*ip;
1873 
1874 	/* No update necessary if we are at the end of the list. */
1875 	if (next_agino == NULLAGINO)
1876 		return 0;
1877 
1878 	ip = xfs_iunlink_lookup(pag, next_agino);
1879 	if (!ip)
1880 		return -ENOLINK;
1881 
1882 	ip->i_prev_unlinked = prev_agino;
1883 	return 0;
1884 }
1885 
1886 /*
1887  * Point the AGI unlinked bucket at an inode and log the results.  The caller
1888  * is responsible for validating the old value.
1889  */
1890 STATIC int
xfs_iunlink_update_bucket(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_buf * agibp,unsigned int bucket_index,xfs_agino_t new_agino)1891 xfs_iunlink_update_bucket(
1892 	struct xfs_trans	*tp,
1893 	struct xfs_perag	*pag,
1894 	struct xfs_buf		*agibp,
1895 	unsigned int		bucket_index,
1896 	xfs_agino_t		new_agino)
1897 {
1898 	struct xfs_agi		*agi = agibp->b_addr;
1899 	xfs_agino_t		old_value;
1900 	int			offset;
1901 
1902 	ASSERT(xfs_verify_agino_or_null(pag, new_agino));
1903 
1904 	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1905 	trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
1906 			old_value, new_agino);
1907 
1908 	/*
1909 	 * We should never find the head of the list already set to the value
1910 	 * passed in because either we're adding or removing ourselves from the
1911 	 * head of the list.
1912 	 */
1913 	if (old_value == new_agino) {
1914 		xfs_buf_mark_corrupt(agibp);
1915 		return -EFSCORRUPTED;
1916 	}
1917 
1918 	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
1919 	offset = offsetof(struct xfs_agi, agi_unlinked) +
1920 			(sizeof(xfs_agino_t) * bucket_index);
1921 	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
1922 	return 0;
1923 }
1924 
1925 /*
1926  * Load the inode @next_agino into the cache and set its prev_unlinked pointer
1927  * to @prev_agino.  Caller must hold the AGI to synchronize with other changes
1928  * to the unlinked list.
1929  */
1930 STATIC int
xfs_iunlink_reload_next(struct xfs_trans * tp,struct xfs_buf * agibp,xfs_agino_t prev_agino,xfs_agino_t next_agino)1931 xfs_iunlink_reload_next(
1932 	struct xfs_trans	*tp,
1933 	struct xfs_buf		*agibp,
1934 	xfs_agino_t		prev_agino,
1935 	xfs_agino_t		next_agino)
1936 {
1937 	struct xfs_perag	*pag = agibp->b_pag;
1938 	struct xfs_mount	*mp = pag->pag_mount;
1939 	struct xfs_inode	*next_ip = NULL;
1940 	xfs_ino_t		ino;
1941 	int			error;
1942 
1943 	ASSERT(next_agino != NULLAGINO);
1944 
1945 #ifdef DEBUG
1946 	rcu_read_lock();
1947 	next_ip = radix_tree_lookup(&pag->pag_ici_root, next_agino);
1948 	ASSERT(next_ip == NULL);
1949 	rcu_read_unlock();
1950 #endif
1951 
1952 	xfs_info_ratelimited(mp,
1953  "Found unrecovered unlinked inode 0x%x in AG 0x%x.  Initiating recovery.",
1954 			next_agino, pag->pag_agno);
1955 
1956 	/*
1957 	 * Use an untrusted lookup just to be cautious in case the AGI has been
1958 	 * corrupted and now points at a free inode.  That shouldn't happen,
1959 	 * but we'd rather shut down now since we're already running in a weird
1960 	 * situation.
1961 	 */
1962 	ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, next_agino);
1963 	error = xfs_iget(mp, tp, ino, XFS_IGET_UNTRUSTED, 0, &next_ip);
1964 	if (error)
1965 		return error;
1966 
1967 	/* If this is not an unlinked inode, something is very wrong. */
1968 	if (VFS_I(next_ip)->i_nlink != 0) {
1969 		error = -EFSCORRUPTED;
1970 		goto rele;
1971 	}
1972 
1973 	next_ip->i_prev_unlinked = prev_agino;
1974 	trace_xfs_iunlink_reload_next(next_ip);
1975 rele:
1976 	ASSERT(!(VFS_I(next_ip)->i_state & I_DONTCACHE));
1977 	if (xfs_is_quotacheck_running(mp) && next_ip)
1978 		xfs_iflags_set(next_ip, XFS_IQUOTAUNCHECKED);
1979 	xfs_irele(next_ip);
1980 	return error;
1981 }
1982 
1983 static int
xfs_iunlink_insert_inode(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_buf * agibp,struct xfs_inode * ip)1984 xfs_iunlink_insert_inode(
1985 	struct xfs_trans	*tp,
1986 	struct xfs_perag	*pag,
1987 	struct xfs_buf		*agibp,
1988 	struct xfs_inode	*ip)
1989 {
1990 	struct xfs_mount	*mp = tp->t_mountp;
1991 	struct xfs_agi		*agi = agibp->b_addr;
1992 	xfs_agino_t		next_agino;
1993 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1994 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1995 	int			error;
1996 
1997 	/*
1998 	 * Get the index into the agi hash table for the list this inode will
1999 	 * go on.  Make sure the pointer isn't garbage and that this inode
2000 	 * isn't already on the list.
2001 	 */
2002 	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2003 	if (next_agino == agino ||
2004 	    !xfs_verify_agino_or_null(pag, next_agino)) {
2005 		xfs_buf_mark_corrupt(agibp);
2006 		return -EFSCORRUPTED;
2007 	}
2008 
2009 	/*
2010 	 * Update the prev pointer in the next inode to point back to this
2011 	 * inode.
2012 	 */
2013 	error = xfs_iunlink_update_backref(pag, agino, next_agino);
2014 	if (error == -ENOLINK)
2015 		error = xfs_iunlink_reload_next(tp, agibp, agino, next_agino);
2016 	if (error)
2017 		return error;
2018 
2019 	if (next_agino != NULLAGINO) {
2020 		/*
2021 		 * There is already another inode in the bucket, so point this
2022 		 * inode to the current head of the list.
2023 		 */
2024 		error = xfs_iunlink_log_inode(tp, ip, pag, next_agino);
2025 		if (error)
2026 			return error;
2027 		ip->i_next_unlinked = next_agino;
2028 	}
2029 
2030 	/* Point the head of the list to point to this inode. */
2031 	ip->i_prev_unlinked = NULLAGINO;
2032 	return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
2033 }
2034 
2035 /*
2036  * This is called when the inode's link count has gone to 0 or we are creating
2037  * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
2038  *
2039  * We place the on-disk inode on a list in the AGI.  It will be pulled from this
2040  * list when the inode is freed.
2041  */
2042 STATIC int
xfs_iunlink(struct xfs_trans * tp,struct xfs_inode * ip)2043 xfs_iunlink(
2044 	struct xfs_trans	*tp,
2045 	struct xfs_inode	*ip)
2046 {
2047 	struct xfs_mount	*mp = tp->t_mountp;
2048 	struct xfs_perag	*pag;
2049 	struct xfs_buf		*agibp;
2050 	int			error;
2051 
2052 	ASSERT(VFS_I(ip)->i_nlink == 0);
2053 	ASSERT(VFS_I(ip)->i_mode != 0);
2054 	trace_xfs_iunlink(ip);
2055 
2056 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2057 
2058 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2059 	error = xfs_read_agi(pag, tp, &agibp);
2060 	if (error)
2061 		goto out;
2062 
2063 	error = xfs_iunlink_insert_inode(tp, pag, agibp, ip);
2064 out:
2065 	xfs_perag_put(pag);
2066 	return error;
2067 }
2068 
2069 static int
xfs_iunlink_remove_inode(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_buf * agibp,struct xfs_inode * ip)2070 xfs_iunlink_remove_inode(
2071 	struct xfs_trans	*tp,
2072 	struct xfs_perag	*pag,
2073 	struct xfs_buf		*agibp,
2074 	struct xfs_inode	*ip)
2075 {
2076 	struct xfs_mount	*mp = tp->t_mountp;
2077 	struct xfs_agi		*agi = agibp->b_addr;
2078 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2079 	xfs_agino_t		head_agino;
2080 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2081 	int			error;
2082 
2083 	trace_xfs_iunlink_remove(ip);
2084 
2085 	/*
2086 	 * Get the index into the agi hash table for the list this inode will
2087 	 * go on.  Make sure the head pointer isn't garbage.
2088 	 */
2089 	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2090 	if (!xfs_verify_agino(pag, head_agino)) {
2091 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2092 				agi, sizeof(*agi));
2093 		return -EFSCORRUPTED;
2094 	}
2095 
2096 	/*
2097 	 * Set our inode's next_unlinked pointer to NULL and then return
2098 	 * the old pointer value so that we can update whatever was previous
2099 	 * to us in the list to point to whatever was next in the list.
2100 	 */
2101 	error = xfs_iunlink_log_inode(tp, ip, pag, NULLAGINO);
2102 	if (error)
2103 		return error;
2104 
2105 	/*
2106 	 * Update the prev pointer in the next inode to point back to previous
2107 	 * inode in the chain.
2108 	 */
2109 	error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
2110 			ip->i_next_unlinked);
2111 	if (error == -ENOLINK)
2112 		error = xfs_iunlink_reload_next(tp, agibp, ip->i_prev_unlinked,
2113 				ip->i_next_unlinked);
2114 	if (error)
2115 		return error;
2116 
2117 	if (head_agino != agino) {
2118 		struct xfs_inode	*prev_ip;
2119 
2120 		prev_ip = xfs_iunlink_lookup(pag, ip->i_prev_unlinked);
2121 		if (!prev_ip)
2122 			return -EFSCORRUPTED;
2123 
2124 		error = xfs_iunlink_log_inode(tp, prev_ip, pag,
2125 				ip->i_next_unlinked);
2126 		prev_ip->i_next_unlinked = ip->i_next_unlinked;
2127 	} else {
2128 		/* Point the head of the list to the next unlinked inode. */
2129 		error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
2130 				ip->i_next_unlinked);
2131 	}
2132 
2133 	ip->i_next_unlinked = NULLAGINO;
2134 	ip->i_prev_unlinked = 0;
2135 	return error;
2136 }
2137 
2138 /*
2139  * Pull the on-disk inode from the AGI unlinked list.
2140  */
2141 STATIC int
xfs_iunlink_remove(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_inode * ip)2142 xfs_iunlink_remove(
2143 	struct xfs_trans	*tp,
2144 	struct xfs_perag	*pag,
2145 	struct xfs_inode	*ip)
2146 {
2147 	struct xfs_buf		*agibp;
2148 	int			error;
2149 
2150 	trace_xfs_iunlink_remove(ip);
2151 
2152 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2153 	error = xfs_read_agi(pag, tp, &agibp);
2154 	if (error)
2155 		return error;
2156 
2157 	return xfs_iunlink_remove_inode(tp, pag, agibp, ip);
2158 }
2159 
2160 /*
2161  * Look up the inode number specified and if it is not already marked XFS_ISTALE
2162  * mark it stale. We should only find clean inodes in this lookup that aren't
2163  * already stale.
2164  */
2165 static void
xfs_ifree_mark_inode_stale(struct xfs_perag * pag,struct xfs_inode * free_ip,xfs_ino_t inum)2166 xfs_ifree_mark_inode_stale(
2167 	struct xfs_perag	*pag,
2168 	struct xfs_inode	*free_ip,
2169 	xfs_ino_t		inum)
2170 {
2171 	struct xfs_mount	*mp = pag->pag_mount;
2172 	struct xfs_inode_log_item *iip;
2173 	struct xfs_inode	*ip;
2174 
2175 retry:
2176 	rcu_read_lock();
2177 	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2178 
2179 	/* Inode not in memory, nothing to do */
2180 	if (!ip) {
2181 		rcu_read_unlock();
2182 		return;
2183 	}
2184 
2185 	/*
2186 	 * because this is an RCU protected lookup, we could find a recently
2187 	 * freed or even reallocated inode during the lookup. We need to check
2188 	 * under the i_flags_lock for a valid inode here. Skip it if it is not
2189 	 * valid, the wrong inode or stale.
2190 	 */
2191 	spin_lock(&ip->i_flags_lock);
2192 	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2193 		goto out_iflags_unlock;
2194 
2195 	/*
2196 	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2197 	 * other inodes that we did not find in the list attached to the buffer
2198 	 * and are not already marked stale. If we can't lock it, back off and
2199 	 * retry.
2200 	 */
2201 	if (ip != free_ip) {
2202 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2203 			spin_unlock(&ip->i_flags_lock);
2204 			rcu_read_unlock();
2205 			delay(1);
2206 			goto retry;
2207 		}
2208 	}
2209 	ip->i_flags |= XFS_ISTALE;
2210 
2211 	/*
2212 	 * If the inode is flushing, it is already attached to the buffer.  All
2213 	 * we needed to do here is mark the inode stale so buffer IO completion
2214 	 * will remove it from the AIL.
2215 	 */
2216 	iip = ip->i_itemp;
2217 	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
2218 		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2219 		ASSERT(iip->ili_last_fields);
2220 		goto out_iunlock;
2221 	}
2222 
2223 	/*
2224 	 * Inodes not attached to the buffer can be released immediately.
2225 	 * Everything else has to go through xfs_iflush_abort() on journal
2226 	 * commit as the flock synchronises removal of the inode from the
2227 	 * cluster buffer against inode reclaim.
2228 	 */
2229 	if (!iip || list_empty(&iip->ili_item.li_bio_list))
2230 		goto out_iunlock;
2231 
2232 	__xfs_iflags_set(ip, XFS_IFLUSHING);
2233 	spin_unlock(&ip->i_flags_lock);
2234 	rcu_read_unlock();
2235 
2236 	/* we have a dirty inode in memory that has not yet been flushed. */
2237 	spin_lock(&iip->ili_lock);
2238 	iip->ili_last_fields = iip->ili_fields;
2239 	iip->ili_fields = 0;
2240 	iip->ili_fsync_fields = 0;
2241 	spin_unlock(&iip->ili_lock);
2242 	ASSERT(iip->ili_last_fields);
2243 
2244 	if (ip != free_ip)
2245 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2246 	return;
2247 
2248 out_iunlock:
2249 	if (ip != free_ip)
2250 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2251 out_iflags_unlock:
2252 	spin_unlock(&ip->i_flags_lock);
2253 	rcu_read_unlock();
2254 }
2255 
2256 /*
2257  * A big issue when freeing the inode cluster is that we _cannot_ skip any
2258  * inodes that are in memory - they all must be marked stale and attached to
2259  * the cluster buffer.
2260  */
2261 static int
xfs_ifree_cluster(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_inode * free_ip,struct xfs_icluster * xic)2262 xfs_ifree_cluster(
2263 	struct xfs_trans	*tp,
2264 	struct xfs_perag	*pag,
2265 	struct xfs_inode	*free_ip,
2266 	struct xfs_icluster	*xic)
2267 {
2268 	struct xfs_mount	*mp = free_ip->i_mount;
2269 	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
2270 	struct xfs_buf		*bp;
2271 	xfs_daddr_t		blkno;
2272 	xfs_ino_t		inum = xic->first_ino;
2273 	int			nbufs;
2274 	int			i, j;
2275 	int			ioffset;
2276 	int			error;
2277 
2278 	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2279 
2280 	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2281 		/*
2282 		 * The allocation bitmap tells us which inodes of the chunk were
2283 		 * physically allocated. Skip the cluster if an inode falls into
2284 		 * a sparse region.
2285 		 */
2286 		ioffset = inum - xic->first_ino;
2287 		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2288 			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2289 			continue;
2290 		}
2291 
2292 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2293 					 XFS_INO_TO_AGBNO(mp, inum));
2294 
2295 		/*
2296 		 * We obtain and lock the backing buffer first in the process
2297 		 * here to ensure dirty inodes attached to the buffer remain in
2298 		 * the flushing state while we mark them stale.
2299 		 *
2300 		 * If we scan the in-memory inodes first, then buffer IO can
2301 		 * complete before we get a lock on it, and hence we may fail
2302 		 * to mark all the active inodes on the buffer stale.
2303 		 */
2304 		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2305 				mp->m_bsize * igeo->blocks_per_cluster,
2306 				XBF_UNMAPPED, &bp);
2307 		if (error)
2308 			return error;
2309 
2310 		/*
2311 		 * This buffer may not have been correctly initialised as we
2312 		 * didn't read it from disk. That's not important because we are
2313 		 * only using to mark the buffer as stale in the log, and to
2314 		 * attach stale cached inodes on it. That means it will never be
2315 		 * dispatched for IO. If it is, we want to know about it, and we
2316 		 * want it to fail. We can acheive this by adding a write
2317 		 * verifier to the buffer.
2318 		 */
2319 		bp->b_ops = &xfs_inode_buf_ops;
2320 
2321 		/*
2322 		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
2323 		 * too. This requires lookups, and will skip inodes that we've
2324 		 * already marked XFS_ISTALE.
2325 		 */
2326 		for (i = 0; i < igeo->inodes_per_cluster; i++)
2327 			xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
2328 
2329 		xfs_trans_stale_inode_buf(tp, bp);
2330 		xfs_trans_binval(tp, bp);
2331 	}
2332 	return 0;
2333 }
2334 
2335 /*
2336  * This is called to return an inode to the inode free list.  The inode should
2337  * already be truncated to 0 length and have no pages associated with it.  This
2338  * routine also assumes that the inode is already a part of the transaction.
2339  *
2340  * The on-disk copy of the inode will have been added to the list of unlinked
2341  * inodes in the AGI. We need to remove the inode from that list atomically with
2342  * respect to freeing it here.
2343  */
2344 int
xfs_ifree(struct xfs_trans * tp,struct xfs_inode * ip)2345 xfs_ifree(
2346 	struct xfs_trans	*tp,
2347 	struct xfs_inode	*ip)
2348 {
2349 	struct xfs_mount	*mp = ip->i_mount;
2350 	struct xfs_perag	*pag;
2351 	struct xfs_icluster	xic = { 0 };
2352 	struct xfs_inode_log_item *iip = ip->i_itemp;
2353 	int			error;
2354 
2355 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2356 	ASSERT(VFS_I(ip)->i_nlink == 0);
2357 	ASSERT(ip->i_df.if_nextents == 0);
2358 	ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2359 	ASSERT(ip->i_nblocks == 0);
2360 
2361 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2362 
2363 	/*
2364 	 * Free the inode first so that we guarantee that the AGI lock is going
2365 	 * to be taken before we remove the inode from the unlinked list. This
2366 	 * makes the AGI lock -> unlinked list modification order the same as
2367 	 * used in O_TMPFILE creation.
2368 	 */
2369 	error = xfs_difree(tp, pag, ip->i_ino, &xic);
2370 	if (error)
2371 		goto out;
2372 
2373 	error = xfs_iunlink_remove(tp, pag, ip);
2374 	if (error)
2375 		goto out;
2376 
2377 	/*
2378 	 * Free any local-format data sitting around before we reset the
2379 	 * data fork to extents format.  Note that the attr fork data has
2380 	 * already been freed by xfs_attr_inactive.
2381 	 */
2382 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2383 		kmem_free(ip->i_df.if_u1.if_data);
2384 		ip->i_df.if_u1.if_data = NULL;
2385 		ip->i_df.if_bytes = 0;
2386 	}
2387 
2388 	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2389 	ip->i_diflags = 0;
2390 	ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
2391 	ip->i_forkoff = 0;		/* mark the attr fork not in use */
2392 	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2393 	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
2394 		xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2395 
2396 	/* Don't attempt to replay owner changes for a deleted inode */
2397 	spin_lock(&iip->ili_lock);
2398 	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2399 	spin_unlock(&iip->ili_lock);
2400 
2401 	/*
2402 	 * Bump the generation count so no one will be confused
2403 	 * by reincarnations of this inode.
2404 	 */
2405 	VFS_I(ip)->i_generation++;
2406 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2407 
2408 	if (xic.deleted)
2409 		error = xfs_ifree_cluster(tp, pag, ip, &xic);
2410 out:
2411 	xfs_perag_put(pag);
2412 	return error;
2413 }
2414 
2415 /*
2416  * This is called to unpin an inode.  The caller must have the inode locked
2417  * in at least shared mode so that the buffer cannot be subsequently pinned
2418  * once someone is waiting for it to be unpinned.
2419  */
2420 static void
xfs_iunpin(struct xfs_inode * ip)2421 xfs_iunpin(
2422 	struct xfs_inode	*ip)
2423 {
2424 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2425 
2426 	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2427 
2428 	/* Give the log a push to start the unpinning I/O */
2429 	xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2430 
2431 }
2432 
2433 static void
__xfs_iunpin_wait(struct xfs_inode * ip)2434 __xfs_iunpin_wait(
2435 	struct xfs_inode	*ip)
2436 {
2437 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2438 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2439 
2440 	xfs_iunpin(ip);
2441 
2442 	do {
2443 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2444 		if (xfs_ipincount(ip))
2445 			io_schedule();
2446 	} while (xfs_ipincount(ip));
2447 	finish_wait(wq, &wait.wq_entry);
2448 }
2449 
2450 void
xfs_iunpin_wait(struct xfs_inode * ip)2451 xfs_iunpin_wait(
2452 	struct xfs_inode	*ip)
2453 {
2454 	if (xfs_ipincount(ip))
2455 		__xfs_iunpin_wait(ip);
2456 }
2457 
2458 /*
2459  * Removing an inode from the namespace involves removing the directory entry
2460  * and dropping the link count on the inode. Removing the directory entry can
2461  * result in locking an AGF (directory blocks were freed) and removing a link
2462  * count can result in placing the inode on an unlinked list which results in
2463  * locking an AGI.
2464  *
2465  * The big problem here is that we have an ordering constraint on AGF and AGI
2466  * locking - inode allocation locks the AGI, then can allocate a new extent for
2467  * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2468  * removes the inode from the unlinked list, requiring that we lock the AGI
2469  * first, and then freeing the inode can result in an inode chunk being freed
2470  * and hence freeing disk space requiring that we lock an AGF.
2471  *
2472  * Hence the ordering that is imposed by other parts of the code is AGI before
2473  * AGF. This means we cannot remove the directory entry before we drop the inode
2474  * reference count and put it on the unlinked list as this results in a lock
2475  * order of AGF then AGI, and this can deadlock against inode allocation and
2476  * freeing. Therefore we must drop the link counts before we remove the
2477  * directory entry.
2478  *
2479  * This is still safe from a transactional point of view - it is not until we
2480  * get to xfs_defer_finish() that we have the possibility of multiple
2481  * transactions in this operation. Hence as long as we remove the directory
2482  * entry and drop the link count in the first transaction of the remove
2483  * operation, there are no transactional constraints on the ordering here.
2484  */
2485 int
xfs_remove(xfs_inode_t * dp,struct xfs_name * name,xfs_inode_t * ip)2486 xfs_remove(
2487 	xfs_inode_t             *dp,
2488 	struct xfs_name		*name,
2489 	xfs_inode_t		*ip)
2490 {
2491 	xfs_mount_t		*mp = dp->i_mount;
2492 	xfs_trans_t             *tp = NULL;
2493 	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2494 	int			dontcare;
2495 	int                     error = 0;
2496 	uint			resblks;
2497 
2498 	trace_xfs_remove(dp, name);
2499 
2500 	if (xfs_is_shutdown(mp))
2501 		return -EIO;
2502 
2503 	error = xfs_qm_dqattach(dp);
2504 	if (error)
2505 		goto std_return;
2506 
2507 	error = xfs_qm_dqattach(ip);
2508 	if (error)
2509 		goto std_return;
2510 
2511 	/*
2512 	 * We try to get the real space reservation first, allowing for
2513 	 * directory btree deletion(s) implying possible bmap insert(s).  If we
2514 	 * can't get the space reservation then we use 0 instead, and avoid the
2515 	 * bmap btree insert(s) in the directory code by, if the bmap insert
2516 	 * tries to happen, instead trimming the LAST block from the directory.
2517 	 *
2518 	 * Ignore EDQUOT and ENOSPC being returned via nospace_error because
2519 	 * the directory code can handle a reservationless update and we don't
2520 	 * want to prevent a user from trying to free space by deleting things.
2521 	 */
2522 	resblks = XFS_REMOVE_SPACE_RES(mp);
2523 	error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
2524 			&tp, &dontcare);
2525 	if (error) {
2526 		ASSERT(error != -ENOSPC);
2527 		goto std_return;
2528 	}
2529 
2530 	/*
2531 	 * If we're removing a directory perform some additional validation.
2532 	 */
2533 	if (is_dir) {
2534 		ASSERT(VFS_I(ip)->i_nlink >= 2);
2535 		if (VFS_I(ip)->i_nlink != 2) {
2536 			error = -ENOTEMPTY;
2537 			goto out_trans_cancel;
2538 		}
2539 		if (!xfs_dir_isempty(ip)) {
2540 			error = -ENOTEMPTY;
2541 			goto out_trans_cancel;
2542 		}
2543 
2544 		/* Drop the link from ip's "..".  */
2545 		error = xfs_droplink(tp, dp);
2546 		if (error)
2547 			goto out_trans_cancel;
2548 
2549 		/* Drop the "." link from ip to self.  */
2550 		error = xfs_droplink(tp, ip);
2551 		if (error)
2552 			goto out_trans_cancel;
2553 
2554 		/*
2555 		 * Point the unlinked child directory's ".." entry to the root
2556 		 * directory to eliminate back-references to inodes that may
2557 		 * get freed before the child directory is closed.  If the fs
2558 		 * gets shrunk, this can lead to dirent inode validation errors.
2559 		 */
2560 		if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
2561 			error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
2562 					tp->t_mountp->m_sb.sb_rootino, 0);
2563 			if (error)
2564 				goto out_trans_cancel;
2565 		}
2566 	} else {
2567 		/*
2568 		 * When removing a non-directory we need to log the parent
2569 		 * inode here.  For a directory this is done implicitly
2570 		 * by the xfs_droplink call for the ".." entry.
2571 		 */
2572 		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2573 	}
2574 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2575 
2576 	/* Drop the link from dp to ip. */
2577 	error = xfs_droplink(tp, ip);
2578 	if (error)
2579 		goto out_trans_cancel;
2580 
2581 	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2582 	if (error) {
2583 		ASSERT(error != -ENOENT);
2584 		goto out_trans_cancel;
2585 	}
2586 
2587 	/*
2588 	 * If this is a synchronous mount, make sure that the
2589 	 * remove transaction goes to disk before returning to
2590 	 * the user.
2591 	 */
2592 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
2593 		xfs_trans_set_sync(tp);
2594 
2595 	error = xfs_trans_commit(tp);
2596 	if (error)
2597 		goto std_return;
2598 
2599 	if (is_dir && xfs_inode_is_filestream(ip))
2600 		xfs_filestream_deassociate(ip);
2601 
2602 	return 0;
2603 
2604  out_trans_cancel:
2605 	xfs_trans_cancel(tp);
2606  std_return:
2607 	return error;
2608 }
2609 
2610 /*
2611  * Enter all inodes for a rename transaction into a sorted array.
2612  */
2613 #define __XFS_SORT_INODES	5
2614 STATIC void
xfs_sort_for_rename(struct xfs_inode * dp1,struct xfs_inode * dp2,struct xfs_inode * ip1,struct xfs_inode * ip2,struct xfs_inode * wip,struct xfs_inode ** i_tab,int * num_inodes)2615 xfs_sort_for_rename(
2616 	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
2617 	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
2618 	struct xfs_inode	*ip1,	/* in: inode of old entry */
2619 	struct xfs_inode	*ip2,	/* in: inode of new entry */
2620 	struct xfs_inode	*wip,	/* in: whiteout inode */
2621 	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
2622 	int			*num_inodes)  /* in/out: inodes in array */
2623 {
2624 	int			i, j;
2625 
2626 	ASSERT(*num_inodes == __XFS_SORT_INODES);
2627 	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2628 
2629 	/*
2630 	 * i_tab contains a list of pointers to inodes.  We initialize
2631 	 * the table here & we'll sort it.  We will then use it to
2632 	 * order the acquisition of the inode locks.
2633 	 *
2634 	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2635 	 */
2636 	i = 0;
2637 	i_tab[i++] = dp1;
2638 	i_tab[i++] = dp2;
2639 	i_tab[i++] = ip1;
2640 	if (ip2)
2641 		i_tab[i++] = ip2;
2642 	if (wip)
2643 		i_tab[i++] = wip;
2644 	*num_inodes = i;
2645 
2646 	/*
2647 	 * Sort the elements via bubble sort.  (Remember, there are at
2648 	 * most 5 elements to sort, so this is adequate.)
2649 	 */
2650 	for (i = 0; i < *num_inodes; i++) {
2651 		for (j = 1; j < *num_inodes; j++) {
2652 			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2653 				struct xfs_inode *temp = i_tab[j];
2654 				i_tab[j] = i_tab[j-1];
2655 				i_tab[j-1] = temp;
2656 			}
2657 		}
2658 	}
2659 }
2660 
2661 static int
xfs_finish_rename(struct xfs_trans * tp)2662 xfs_finish_rename(
2663 	struct xfs_trans	*tp)
2664 {
2665 	/*
2666 	 * If this is a synchronous mount, make sure that the rename transaction
2667 	 * goes to disk before returning to the user.
2668 	 */
2669 	if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
2670 		xfs_trans_set_sync(tp);
2671 
2672 	return xfs_trans_commit(tp);
2673 }
2674 
2675 /*
2676  * xfs_cross_rename()
2677  *
2678  * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2679  */
2680 STATIC int
xfs_cross_rename(struct xfs_trans * tp,struct xfs_inode * dp1,struct xfs_name * name1,struct xfs_inode * ip1,struct xfs_inode * dp2,struct xfs_name * name2,struct xfs_inode * ip2,int spaceres)2681 xfs_cross_rename(
2682 	struct xfs_trans	*tp,
2683 	struct xfs_inode	*dp1,
2684 	struct xfs_name		*name1,
2685 	struct xfs_inode	*ip1,
2686 	struct xfs_inode	*dp2,
2687 	struct xfs_name		*name2,
2688 	struct xfs_inode	*ip2,
2689 	int			spaceres)
2690 {
2691 	int		error = 0;
2692 	int		ip1_flags = 0;
2693 	int		ip2_flags = 0;
2694 	int		dp2_flags = 0;
2695 
2696 	/* Swap inode number for dirent in first parent */
2697 	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2698 	if (error)
2699 		goto out_trans_abort;
2700 
2701 	/* Swap inode number for dirent in second parent */
2702 	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2703 	if (error)
2704 		goto out_trans_abort;
2705 
2706 	/*
2707 	 * If we're renaming one or more directories across different parents,
2708 	 * update the respective ".." entries (and link counts) to match the new
2709 	 * parents.
2710 	 */
2711 	if (dp1 != dp2) {
2712 		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2713 
2714 		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2715 			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2716 						dp1->i_ino, spaceres);
2717 			if (error)
2718 				goto out_trans_abort;
2719 
2720 			/* transfer ip2 ".." reference to dp1 */
2721 			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2722 				error = xfs_droplink(tp, dp2);
2723 				if (error)
2724 					goto out_trans_abort;
2725 				xfs_bumplink(tp, dp1);
2726 			}
2727 
2728 			/*
2729 			 * Although ip1 isn't changed here, userspace needs
2730 			 * to be warned about the change, so that applications
2731 			 * relying on it (like backup ones), will properly
2732 			 * notify the change
2733 			 */
2734 			ip1_flags |= XFS_ICHGTIME_CHG;
2735 			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2736 		}
2737 
2738 		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2739 			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2740 						dp2->i_ino, spaceres);
2741 			if (error)
2742 				goto out_trans_abort;
2743 
2744 			/* transfer ip1 ".." reference to dp2 */
2745 			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
2746 				error = xfs_droplink(tp, dp1);
2747 				if (error)
2748 					goto out_trans_abort;
2749 				xfs_bumplink(tp, dp2);
2750 			}
2751 
2752 			/*
2753 			 * Although ip2 isn't changed here, userspace needs
2754 			 * to be warned about the change, so that applications
2755 			 * relying on it (like backup ones), will properly
2756 			 * notify the change
2757 			 */
2758 			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2759 			ip2_flags |= XFS_ICHGTIME_CHG;
2760 		}
2761 	}
2762 
2763 	if (ip1_flags) {
2764 		xfs_trans_ichgtime(tp, ip1, ip1_flags);
2765 		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2766 	}
2767 	if (ip2_flags) {
2768 		xfs_trans_ichgtime(tp, ip2, ip2_flags);
2769 		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2770 	}
2771 	if (dp2_flags) {
2772 		xfs_trans_ichgtime(tp, dp2, dp2_flags);
2773 		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2774 	}
2775 	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2776 	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2777 	return xfs_finish_rename(tp);
2778 
2779 out_trans_abort:
2780 	xfs_trans_cancel(tp);
2781 	return error;
2782 }
2783 
2784 /*
2785  * xfs_rename_alloc_whiteout()
2786  *
2787  * Return a referenced, unlinked, unlocked inode that can be used as a
2788  * whiteout in a rename transaction. We use a tmpfile inode here so that if we
2789  * crash between allocating the inode and linking it into the rename transaction
2790  * recovery will free the inode and we won't leak it.
2791  */
2792 static int
xfs_rename_alloc_whiteout(struct mnt_idmap * idmap,struct xfs_name * src_name,struct xfs_inode * dp,struct xfs_inode ** wip)2793 xfs_rename_alloc_whiteout(
2794 	struct mnt_idmap	*idmap,
2795 	struct xfs_name		*src_name,
2796 	struct xfs_inode	*dp,
2797 	struct xfs_inode	**wip)
2798 {
2799 	struct xfs_inode	*tmpfile;
2800 	struct qstr		name;
2801 	int			error;
2802 
2803 	error = xfs_create_tmpfile(idmap, dp, S_IFCHR | WHITEOUT_MODE,
2804 				   &tmpfile);
2805 	if (error)
2806 		return error;
2807 
2808 	name.name = src_name->name;
2809 	name.len = src_name->len;
2810 	error = xfs_inode_init_security(VFS_I(tmpfile), VFS_I(dp), &name);
2811 	if (error) {
2812 		xfs_finish_inode_setup(tmpfile);
2813 		xfs_irele(tmpfile);
2814 		return error;
2815 	}
2816 
2817 	/*
2818 	 * Prepare the tmpfile inode as if it were created through the VFS.
2819 	 * Complete the inode setup and flag it as linkable.  nlink is already
2820 	 * zero, so we can skip the drop_nlink.
2821 	 */
2822 	xfs_setup_iops(tmpfile);
2823 	xfs_finish_inode_setup(tmpfile);
2824 	VFS_I(tmpfile)->i_state |= I_LINKABLE;
2825 
2826 	*wip = tmpfile;
2827 	return 0;
2828 }
2829 
2830 /*
2831  * xfs_rename
2832  */
2833 int
xfs_rename(struct mnt_idmap * idmap,struct xfs_inode * src_dp,struct xfs_name * src_name,struct xfs_inode * src_ip,struct xfs_inode * target_dp,struct xfs_name * target_name,struct xfs_inode * target_ip,unsigned int flags)2834 xfs_rename(
2835 	struct mnt_idmap	*idmap,
2836 	struct xfs_inode	*src_dp,
2837 	struct xfs_name		*src_name,
2838 	struct xfs_inode	*src_ip,
2839 	struct xfs_inode	*target_dp,
2840 	struct xfs_name		*target_name,
2841 	struct xfs_inode	*target_ip,
2842 	unsigned int		flags)
2843 {
2844 	struct xfs_mount	*mp = src_dp->i_mount;
2845 	struct xfs_trans	*tp;
2846 	struct xfs_inode	*wip = NULL;		/* whiteout inode */
2847 	struct xfs_inode	*inodes[__XFS_SORT_INODES];
2848 	int			i;
2849 	int			num_inodes = __XFS_SORT_INODES;
2850 	bool			new_parent = (src_dp != target_dp);
2851 	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
2852 	int			spaceres;
2853 	bool			retried = false;
2854 	int			error, nospace_error = 0;
2855 
2856 	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2857 
2858 	if ((flags & RENAME_EXCHANGE) && !target_ip)
2859 		return -EINVAL;
2860 
2861 	/*
2862 	 * If we are doing a whiteout operation, allocate the whiteout inode
2863 	 * we will be placing at the target and ensure the type is set
2864 	 * appropriately.
2865 	 */
2866 	if (flags & RENAME_WHITEOUT) {
2867 		error = xfs_rename_alloc_whiteout(idmap, src_name,
2868 						  target_dp, &wip);
2869 		if (error)
2870 			return error;
2871 
2872 		/* setup target dirent info as whiteout */
2873 		src_name->type = XFS_DIR3_FT_CHRDEV;
2874 	}
2875 
2876 	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
2877 				inodes, &num_inodes);
2878 
2879 retry:
2880 	nospace_error = 0;
2881 	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
2882 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
2883 	if (error == -ENOSPC) {
2884 		nospace_error = error;
2885 		spaceres = 0;
2886 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2887 				&tp);
2888 	}
2889 	if (error)
2890 		goto out_release_wip;
2891 
2892 	/*
2893 	 * Attach the dquots to the inodes
2894 	 */
2895 	error = xfs_qm_vop_rename_dqattach(inodes);
2896 	if (error)
2897 		goto out_trans_cancel;
2898 
2899 	/*
2900 	 * Lock all the participating inodes. Depending upon whether
2901 	 * the target_name exists in the target directory, and
2902 	 * whether the target directory is the same as the source
2903 	 * directory, we can lock from 2 to 5 inodes.
2904 	 */
2905 	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2906 
2907 	/*
2908 	 * Join all the inodes to the transaction. From this point on,
2909 	 * we can rely on either trans_commit or trans_cancel to unlock
2910 	 * them.
2911 	 */
2912 	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
2913 	if (new_parent)
2914 		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
2915 	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2916 	if (target_ip)
2917 		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
2918 	if (wip)
2919 		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
2920 
2921 	/*
2922 	 * If we are using project inheritance, we only allow renames
2923 	 * into our tree when the project IDs are the same; else the
2924 	 * tree quota mechanism would be circumvented.
2925 	 */
2926 	if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
2927 		     target_dp->i_projid != src_ip->i_projid)) {
2928 		error = -EXDEV;
2929 		goto out_trans_cancel;
2930 	}
2931 
2932 	/* RENAME_EXCHANGE is unique from here on. */
2933 	if (flags & RENAME_EXCHANGE)
2934 		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
2935 					target_dp, target_name, target_ip,
2936 					spaceres);
2937 
2938 	/*
2939 	 * Try to reserve quota to handle an expansion of the target directory.
2940 	 * We'll allow the rename to continue in reservationless mode if we hit
2941 	 * a space usage constraint.  If we trigger reservationless mode, save
2942 	 * the errno if there isn't any free space in the target directory.
2943 	 */
2944 	if (spaceres != 0) {
2945 		error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
2946 				0, false);
2947 		if (error == -EDQUOT || error == -ENOSPC) {
2948 			if (!retried) {
2949 				xfs_trans_cancel(tp);
2950 				xfs_blockgc_free_quota(target_dp, 0);
2951 				retried = true;
2952 				goto retry;
2953 			}
2954 
2955 			nospace_error = error;
2956 			spaceres = 0;
2957 			error = 0;
2958 		}
2959 		if (error)
2960 			goto out_trans_cancel;
2961 	}
2962 
2963 	/*
2964 	 * Check for expected errors before we dirty the transaction
2965 	 * so we can return an error without a transaction abort.
2966 	 */
2967 	if (target_ip == NULL) {
2968 		/*
2969 		 * If there's no space reservation, check the entry will
2970 		 * fit before actually inserting it.
2971 		 */
2972 		if (!spaceres) {
2973 			error = xfs_dir_canenter(tp, target_dp, target_name);
2974 			if (error)
2975 				goto out_trans_cancel;
2976 		}
2977 	} else {
2978 		/*
2979 		 * If target exists and it's a directory, check that whether
2980 		 * it can be destroyed.
2981 		 */
2982 		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
2983 		    (!xfs_dir_isempty(target_ip) ||
2984 		     (VFS_I(target_ip)->i_nlink > 2))) {
2985 			error = -EEXIST;
2986 			goto out_trans_cancel;
2987 		}
2988 	}
2989 
2990 	/*
2991 	 * Lock the AGI buffers we need to handle bumping the nlink of the
2992 	 * whiteout inode off the unlinked list and to handle dropping the
2993 	 * nlink of the target inode.  Per locking order rules, do this in
2994 	 * increasing AG order and before directory block allocation tries to
2995 	 * grab AGFs because we grab AGIs before AGFs.
2996 	 *
2997 	 * The (vfs) caller must ensure that if src is a directory then
2998 	 * target_ip is either null or an empty directory.
2999 	 */
3000 	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
3001 		if (inodes[i] == wip ||
3002 		    (inodes[i] == target_ip &&
3003 		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
3004 			struct xfs_perag	*pag;
3005 			struct xfs_buf		*bp;
3006 
3007 			pag = xfs_perag_get(mp,
3008 					XFS_INO_TO_AGNO(mp, inodes[i]->i_ino));
3009 			error = xfs_read_agi(pag, tp, &bp);
3010 			xfs_perag_put(pag);
3011 			if (error)
3012 				goto out_trans_cancel;
3013 		}
3014 	}
3015 
3016 	/*
3017 	 * Directory entry creation below may acquire the AGF. Remove
3018 	 * the whiteout from the unlinked list first to preserve correct
3019 	 * AGI/AGF locking order. This dirties the transaction so failures
3020 	 * after this point will abort and log recovery will clean up the
3021 	 * mess.
3022 	 *
3023 	 * For whiteouts, we need to bump the link count on the whiteout
3024 	 * inode. After this point, we have a real link, clear the tmpfile
3025 	 * state flag from the inode so it doesn't accidentally get misused
3026 	 * in future.
3027 	 */
3028 	if (wip) {
3029 		struct xfs_perag	*pag;
3030 
3031 		ASSERT(VFS_I(wip)->i_nlink == 0);
3032 
3033 		pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
3034 		error = xfs_iunlink_remove(tp, pag, wip);
3035 		xfs_perag_put(pag);
3036 		if (error)
3037 			goto out_trans_cancel;
3038 
3039 		xfs_bumplink(tp, wip);
3040 		VFS_I(wip)->i_state &= ~I_LINKABLE;
3041 	}
3042 
3043 	/*
3044 	 * Set up the target.
3045 	 */
3046 	if (target_ip == NULL) {
3047 		/*
3048 		 * If target does not exist and the rename crosses
3049 		 * directories, adjust the target directory link count
3050 		 * to account for the ".." reference from the new entry.
3051 		 */
3052 		error = xfs_dir_createname(tp, target_dp, target_name,
3053 					   src_ip->i_ino, spaceres);
3054 		if (error)
3055 			goto out_trans_cancel;
3056 
3057 		xfs_trans_ichgtime(tp, target_dp,
3058 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3059 
3060 		if (new_parent && src_is_directory) {
3061 			xfs_bumplink(tp, target_dp);
3062 		}
3063 	} else { /* target_ip != NULL */
3064 		/*
3065 		 * Link the source inode under the target name.
3066 		 * If the source inode is a directory and we are moving
3067 		 * it across directories, its ".." entry will be
3068 		 * inconsistent until we replace that down below.
3069 		 *
3070 		 * In case there is already an entry with the same
3071 		 * name at the destination directory, remove it first.
3072 		 */
3073 		error = xfs_dir_replace(tp, target_dp, target_name,
3074 					src_ip->i_ino, spaceres);
3075 		if (error)
3076 			goto out_trans_cancel;
3077 
3078 		xfs_trans_ichgtime(tp, target_dp,
3079 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3080 
3081 		/*
3082 		 * Decrement the link count on the target since the target
3083 		 * dir no longer points to it.
3084 		 */
3085 		error = xfs_droplink(tp, target_ip);
3086 		if (error)
3087 			goto out_trans_cancel;
3088 
3089 		if (src_is_directory) {
3090 			/*
3091 			 * Drop the link from the old "." entry.
3092 			 */
3093 			error = xfs_droplink(tp, target_ip);
3094 			if (error)
3095 				goto out_trans_cancel;
3096 		}
3097 	} /* target_ip != NULL */
3098 
3099 	/*
3100 	 * Remove the source.
3101 	 */
3102 	if (new_parent && src_is_directory) {
3103 		/*
3104 		 * Rewrite the ".." entry to point to the new
3105 		 * directory.
3106 		 */
3107 		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3108 					target_dp->i_ino, spaceres);
3109 		ASSERT(error != -EEXIST);
3110 		if (error)
3111 			goto out_trans_cancel;
3112 	}
3113 
3114 	/*
3115 	 * We always want to hit the ctime on the source inode.
3116 	 *
3117 	 * This isn't strictly required by the standards since the source
3118 	 * inode isn't really being changed, but old unix file systems did
3119 	 * it and some incremental backup programs won't work without it.
3120 	 */
3121 	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3122 	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3123 
3124 	/*
3125 	 * Adjust the link count on src_dp.  This is necessary when
3126 	 * renaming a directory, either within one parent when
3127 	 * the target existed, or across two parent directories.
3128 	 */
3129 	if (src_is_directory && (new_parent || target_ip != NULL)) {
3130 
3131 		/*
3132 		 * Decrement link count on src_directory since the
3133 		 * entry that's moved no longer points to it.
3134 		 */
3135 		error = xfs_droplink(tp, src_dp);
3136 		if (error)
3137 			goto out_trans_cancel;
3138 	}
3139 
3140 	/*
3141 	 * For whiteouts, we only need to update the source dirent with the
3142 	 * inode number of the whiteout inode rather than removing it
3143 	 * altogether.
3144 	 */
3145 	if (wip)
3146 		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3147 					spaceres);
3148 	else
3149 		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3150 					   spaceres);
3151 
3152 	if (error)
3153 		goto out_trans_cancel;
3154 
3155 	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3156 	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3157 	if (new_parent)
3158 		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3159 
3160 	error = xfs_finish_rename(tp);
3161 	if (wip)
3162 		xfs_irele(wip);
3163 	return error;
3164 
3165 out_trans_cancel:
3166 	xfs_trans_cancel(tp);
3167 out_release_wip:
3168 	if (wip)
3169 		xfs_irele(wip);
3170 	if (error == -ENOSPC && nospace_error)
3171 		error = nospace_error;
3172 	return error;
3173 }
3174 
3175 static int
xfs_iflush(struct xfs_inode * ip,struct xfs_buf * bp)3176 xfs_iflush(
3177 	struct xfs_inode	*ip,
3178 	struct xfs_buf		*bp)
3179 {
3180 	struct xfs_inode_log_item *iip = ip->i_itemp;
3181 	struct xfs_dinode	*dip;
3182 	struct xfs_mount	*mp = ip->i_mount;
3183 	int			error;
3184 
3185 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3186 	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3187 	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3188 	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3189 	ASSERT(iip->ili_item.li_buf == bp);
3190 
3191 	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3192 
3193 	/*
3194 	 * We don't flush the inode if any of the following checks fail, but we
3195 	 * do still update the log item and attach to the backing buffer as if
3196 	 * the flush happened. This is a formality to facilitate predictable
3197 	 * error handling as the caller will shutdown and fail the buffer.
3198 	 */
3199 	error = -EFSCORRUPTED;
3200 	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3201 			       mp, XFS_ERRTAG_IFLUSH_1)) {
3202 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3203 			"%s: Bad inode %llu magic number 0x%x, ptr "PTR_FMT,
3204 			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3205 		goto flush_out;
3206 	}
3207 	if (S_ISREG(VFS_I(ip)->i_mode)) {
3208 		if (XFS_TEST_ERROR(
3209 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3210 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3211 		    mp, XFS_ERRTAG_IFLUSH_3)) {
3212 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3213 				"%s: Bad regular inode %llu, ptr "PTR_FMT,
3214 				__func__, ip->i_ino, ip);
3215 			goto flush_out;
3216 		}
3217 	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3218 		if (XFS_TEST_ERROR(
3219 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3220 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3221 		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3222 		    mp, XFS_ERRTAG_IFLUSH_4)) {
3223 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3224 				"%s: Bad directory inode %llu, ptr "PTR_FMT,
3225 				__func__, ip->i_ino, ip);
3226 			goto flush_out;
3227 		}
3228 	}
3229 	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) >
3230 				ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3231 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3232 			"%s: detected corrupt incore inode %llu, "
3233 			"total extents = %llu nblocks = %lld, ptr "PTR_FMT,
3234 			__func__, ip->i_ino,
3235 			ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af),
3236 			ip->i_nblocks, ip);
3237 		goto flush_out;
3238 	}
3239 	if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
3240 				mp, XFS_ERRTAG_IFLUSH_6)) {
3241 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3242 			"%s: bad inode %llu, forkoff 0x%x, ptr "PTR_FMT,
3243 			__func__, ip->i_ino, ip->i_forkoff, ip);
3244 		goto flush_out;
3245 	}
3246 
3247 	/*
3248 	 * Inode item log recovery for v2 inodes are dependent on the flushiter
3249 	 * count for correct sequencing.  We bump the flush iteration count so
3250 	 * we can detect flushes which postdate a log record during recovery.
3251 	 * This is redundant as we now log every change and hence this can't
3252 	 * happen but we need to still do it to ensure backwards compatibility
3253 	 * with old kernels that predate logging all inode changes.
3254 	 */
3255 	if (!xfs_has_v3inodes(mp))
3256 		ip->i_flushiter++;
3257 
3258 	/*
3259 	 * If there are inline format data / attr forks attached to this inode,
3260 	 * make sure they are not corrupt.
3261 	 */
3262 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3263 	    xfs_ifork_verify_local_data(ip))
3264 		goto flush_out;
3265 	if (xfs_inode_has_attr_fork(ip) &&
3266 	    ip->i_af.if_format == XFS_DINODE_FMT_LOCAL &&
3267 	    xfs_ifork_verify_local_attr(ip))
3268 		goto flush_out;
3269 
3270 	/*
3271 	 * Copy the dirty parts of the inode into the on-disk inode.  We always
3272 	 * copy out the core of the inode, because if the inode is dirty at all
3273 	 * the core must be.
3274 	 */
3275 	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3276 
3277 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3278 	if (!xfs_has_v3inodes(mp)) {
3279 		if (ip->i_flushiter == DI_MAX_FLUSH)
3280 			ip->i_flushiter = 0;
3281 	}
3282 
3283 	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3284 	if (xfs_inode_has_attr_fork(ip))
3285 		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3286 
3287 	/*
3288 	 * We've recorded everything logged in the inode, so we'd like to clear
3289 	 * the ili_fields bits so we don't log and flush things unnecessarily.
3290 	 * However, we can't stop logging all this information until the data
3291 	 * we've copied into the disk buffer is written to disk.  If we did we
3292 	 * might overwrite the copy of the inode in the log with all the data
3293 	 * after re-logging only part of it, and in the face of a crash we
3294 	 * wouldn't have all the data we need to recover.
3295 	 *
3296 	 * What we do is move the bits to the ili_last_fields field.  When
3297 	 * logging the inode, these bits are moved back to the ili_fields field.
3298 	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3299 	 * we know that the information those bits represent is permanently on
3300 	 * disk.  As long as the flush completes before the inode is logged
3301 	 * again, then both ili_fields and ili_last_fields will be cleared.
3302 	 */
3303 	error = 0;
3304 flush_out:
3305 	spin_lock(&iip->ili_lock);
3306 	iip->ili_last_fields = iip->ili_fields;
3307 	iip->ili_fields = 0;
3308 	iip->ili_fsync_fields = 0;
3309 	spin_unlock(&iip->ili_lock);
3310 
3311 	/*
3312 	 * Store the current LSN of the inode so that we can tell whether the
3313 	 * item has moved in the AIL from xfs_buf_inode_iodone().
3314 	 */
3315 	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3316 				&iip->ili_item.li_lsn);
3317 
3318 	/* generate the checksum. */
3319 	xfs_dinode_calc_crc(mp, dip);
3320 	return error;
3321 }
3322 
3323 /*
3324  * Non-blocking flush of dirty inode metadata into the backing buffer.
3325  *
3326  * The caller must have a reference to the inode and hold the cluster buffer
3327  * locked. The function will walk across all the inodes on the cluster buffer it
3328  * can find and lock without blocking, and flush them to the cluster buffer.
3329  *
3330  * On successful flushing of at least one inode, the caller must write out the
3331  * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
3332  * the caller needs to release the buffer. On failure, the filesystem will be
3333  * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
3334  * will be returned.
3335  */
3336 int
xfs_iflush_cluster(struct xfs_buf * bp)3337 xfs_iflush_cluster(
3338 	struct xfs_buf		*bp)
3339 {
3340 	struct xfs_mount	*mp = bp->b_mount;
3341 	struct xfs_log_item	*lip, *n;
3342 	struct xfs_inode	*ip;
3343 	struct xfs_inode_log_item *iip;
3344 	int			clcount = 0;
3345 	int			error = 0;
3346 
3347 	/*
3348 	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3349 	 * will remove itself from the list.
3350 	 */
3351 	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3352 		iip = (struct xfs_inode_log_item *)lip;
3353 		ip = iip->ili_inode;
3354 
3355 		/*
3356 		 * Quick and dirty check to avoid locks if possible.
3357 		 */
3358 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
3359 			continue;
3360 		if (xfs_ipincount(ip))
3361 			continue;
3362 
3363 		/*
3364 		 * The inode is still attached to the buffer, which means it is
3365 		 * dirty but reclaim might try to grab it. Check carefully for
3366 		 * that, and grab the ilock while still holding the i_flags_lock
3367 		 * to guarantee reclaim will not be able to reclaim this inode
3368 		 * once we drop the i_flags_lock.
3369 		 */
3370 		spin_lock(&ip->i_flags_lock);
3371 		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3372 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
3373 			spin_unlock(&ip->i_flags_lock);
3374 			continue;
3375 		}
3376 
3377 		/*
3378 		 * ILOCK will pin the inode against reclaim and prevent
3379 		 * concurrent transactions modifying the inode while we are
3380 		 * flushing the inode. If we get the lock, set the flushing
3381 		 * state before we drop the i_flags_lock.
3382 		 */
3383 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3384 			spin_unlock(&ip->i_flags_lock);
3385 			continue;
3386 		}
3387 		__xfs_iflags_set(ip, XFS_IFLUSHING);
3388 		spin_unlock(&ip->i_flags_lock);
3389 
3390 		/*
3391 		 * Abort flushing this inode if we are shut down because the
3392 		 * inode may not currently be in the AIL. This can occur when
3393 		 * log I/O failure unpins the inode without inserting into the
3394 		 * AIL, leaving a dirty/unpinned inode attached to the buffer
3395 		 * that otherwise looks like it should be flushed.
3396 		 */
3397 		if (xlog_is_shutdown(mp->m_log)) {
3398 			xfs_iunpin_wait(ip);
3399 			xfs_iflush_abort(ip);
3400 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3401 			error = -EIO;
3402 			continue;
3403 		}
3404 
3405 		/* don't block waiting on a log force to unpin dirty inodes */
3406 		if (xfs_ipincount(ip)) {
3407 			xfs_iflags_clear(ip, XFS_IFLUSHING);
3408 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3409 			continue;
3410 		}
3411 
3412 		if (!xfs_inode_clean(ip))
3413 			error = xfs_iflush(ip, bp);
3414 		else
3415 			xfs_iflags_clear(ip, XFS_IFLUSHING);
3416 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
3417 		if (error)
3418 			break;
3419 		clcount++;
3420 	}
3421 
3422 	if (error) {
3423 		/*
3424 		 * Shutdown first so we kill the log before we release this
3425 		 * buffer. If it is an INODE_ALLOC buffer and pins the tail
3426 		 * of the log, failing it before the _log_ is shut down can
3427 		 * result in the log tail being moved forward in the journal
3428 		 * on disk because log writes can still be taking place. Hence
3429 		 * unpinning the tail will allow the ICREATE intent to be
3430 		 * removed from the log an recovery will fail with uninitialised
3431 		 * inode cluster buffers.
3432 		 */
3433 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3434 		bp->b_flags |= XBF_ASYNC;
3435 		xfs_buf_ioend_fail(bp);
3436 		return error;
3437 	}
3438 
3439 	if (!clcount)
3440 		return -EAGAIN;
3441 
3442 	XFS_STATS_INC(mp, xs_icluster_flushcnt);
3443 	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3444 	return 0;
3445 
3446 }
3447 
3448 /* Release an inode. */
3449 void
xfs_irele(struct xfs_inode * ip)3450 xfs_irele(
3451 	struct xfs_inode	*ip)
3452 {
3453 	trace_xfs_irele(ip, _RET_IP_);
3454 	iput(VFS_I(ip));
3455 }
3456 
3457 /*
3458  * Ensure all commited transactions touching the inode are written to the log.
3459  */
3460 int
xfs_log_force_inode(struct xfs_inode * ip)3461 xfs_log_force_inode(
3462 	struct xfs_inode	*ip)
3463 {
3464 	xfs_csn_t		seq = 0;
3465 
3466 	xfs_ilock(ip, XFS_ILOCK_SHARED);
3467 	if (xfs_ipincount(ip))
3468 		seq = ip->i_itemp->ili_commit_seq;
3469 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
3470 
3471 	if (!seq)
3472 		return 0;
3473 	return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
3474 }
3475 
3476 /*
3477  * Grab the exclusive iolock for a data copy from src to dest, making sure to
3478  * abide vfs locking order (lowest pointer value goes first) and breaking the
3479  * layout leases before proceeding.  The loop is needed because we cannot call
3480  * the blocking break_layout() with the iolocks held, and therefore have to
3481  * back out both locks.
3482  */
3483 static int
xfs_iolock_two_inodes_and_break_layout(struct inode * src,struct inode * dest)3484 xfs_iolock_two_inodes_and_break_layout(
3485 	struct inode		*src,
3486 	struct inode		*dest)
3487 {
3488 	int			error;
3489 
3490 	if (src > dest)
3491 		swap(src, dest);
3492 
3493 retry:
3494 	/* Wait to break both inodes' layouts before we start locking. */
3495 	error = break_layout(src, true);
3496 	if (error)
3497 		return error;
3498 	if (src != dest) {
3499 		error = break_layout(dest, true);
3500 		if (error)
3501 			return error;
3502 	}
3503 
3504 	/* Lock one inode and make sure nobody got in and leased it. */
3505 	inode_lock(src);
3506 	error = break_layout(src, false);
3507 	if (error) {
3508 		inode_unlock(src);
3509 		if (error == -EWOULDBLOCK)
3510 			goto retry;
3511 		return error;
3512 	}
3513 
3514 	if (src == dest)
3515 		return 0;
3516 
3517 	/* Lock the other inode and make sure nobody got in and leased it. */
3518 	inode_lock_nested(dest, I_MUTEX_NONDIR2);
3519 	error = break_layout(dest, false);
3520 	if (error) {
3521 		inode_unlock(src);
3522 		inode_unlock(dest);
3523 		if (error == -EWOULDBLOCK)
3524 			goto retry;
3525 		return error;
3526 	}
3527 
3528 	return 0;
3529 }
3530 
3531 static int
xfs_mmaplock_two_inodes_and_break_dax_layout(struct xfs_inode * ip1,struct xfs_inode * ip2)3532 xfs_mmaplock_two_inodes_and_break_dax_layout(
3533 	struct xfs_inode	*ip1,
3534 	struct xfs_inode	*ip2)
3535 {
3536 	int			error;
3537 	bool			retry;
3538 	struct page		*page;
3539 
3540 	if (ip1->i_ino > ip2->i_ino)
3541 		swap(ip1, ip2);
3542 
3543 again:
3544 	retry = false;
3545 	/* Lock the first inode */
3546 	xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3547 	error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
3548 	if (error || retry) {
3549 		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3550 		if (error == 0 && retry)
3551 			goto again;
3552 		return error;
3553 	}
3554 
3555 	if (ip1 == ip2)
3556 		return 0;
3557 
3558 	/* Nested lock the second inode */
3559 	xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1));
3560 	/*
3561 	 * We cannot use xfs_break_dax_layouts() directly here because it may
3562 	 * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
3563 	 * for this nested lock case.
3564 	 */
3565 	page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
3566 	if (page && page_ref_count(page) != 1) {
3567 		xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3568 		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3569 		goto again;
3570 	}
3571 
3572 	return 0;
3573 }
3574 
3575 /*
3576  * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3577  * mmap activity.
3578  */
3579 int
xfs_ilock2_io_mmap(struct xfs_inode * ip1,struct xfs_inode * ip2)3580 xfs_ilock2_io_mmap(
3581 	struct xfs_inode	*ip1,
3582 	struct xfs_inode	*ip2)
3583 {
3584 	int			ret;
3585 
3586 	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3587 	if (ret)
3588 		return ret;
3589 
3590 	if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
3591 		ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2);
3592 		if (ret) {
3593 			inode_unlock(VFS_I(ip2));
3594 			if (ip1 != ip2)
3595 				inode_unlock(VFS_I(ip1));
3596 			return ret;
3597 		}
3598 	} else
3599 		filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
3600 					    VFS_I(ip2)->i_mapping);
3601 
3602 	return 0;
3603 }
3604 
3605 /* Unlock both inodes to allow IO and mmap activity. */
3606 void
xfs_iunlock2_io_mmap(struct xfs_inode * ip1,struct xfs_inode * ip2)3607 xfs_iunlock2_io_mmap(
3608 	struct xfs_inode	*ip1,
3609 	struct xfs_inode	*ip2)
3610 {
3611 	if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
3612 		xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3613 		if (ip1 != ip2)
3614 			xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3615 	} else
3616 		filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
3617 					      VFS_I(ip2)->i_mapping);
3618 
3619 	inode_unlock(VFS_I(ip2));
3620 	if (ip1 != ip2)
3621 		inode_unlock(VFS_I(ip1));
3622 }
3623 
3624 /*
3625  * Reload the incore inode list for this inode.  Caller should ensure that
3626  * the link count cannot change, either by taking ILOCK_SHARED or otherwise
3627  * preventing other threads from executing.
3628  */
3629 int
xfs_inode_reload_unlinked_bucket(struct xfs_trans * tp,struct xfs_inode * ip)3630 xfs_inode_reload_unlinked_bucket(
3631 	struct xfs_trans	*tp,
3632 	struct xfs_inode	*ip)
3633 {
3634 	struct xfs_mount	*mp = tp->t_mountp;
3635 	struct xfs_buf		*agibp;
3636 	struct xfs_agi		*agi;
3637 	struct xfs_perag	*pag;
3638 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
3639 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
3640 	xfs_agino_t		prev_agino, next_agino;
3641 	unsigned int		bucket;
3642 	bool			foundit = false;
3643 	int			error;
3644 
3645 	/* Grab the first inode in the list */
3646 	pag = xfs_perag_get(mp, agno);
3647 	error = xfs_ialloc_read_agi(pag, tp, &agibp);
3648 	xfs_perag_put(pag);
3649 	if (error)
3650 		return error;
3651 
3652 	/*
3653 	 * We've taken ILOCK_SHARED and the AGI buffer lock to stabilize the
3654 	 * incore unlinked list pointers for this inode.  Check once more to
3655 	 * see if we raced with anyone else to reload the unlinked list.
3656 	 */
3657 	if (!xfs_inode_unlinked_incomplete(ip)) {
3658 		foundit = true;
3659 		goto out_agibp;
3660 	}
3661 
3662 	bucket = agino % XFS_AGI_UNLINKED_BUCKETS;
3663 	agi = agibp->b_addr;
3664 
3665 	trace_xfs_inode_reload_unlinked_bucket(ip);
3666 
3667 	xfs_info_ratelimited(mp,
3668  "Found unrecovered unlinked inode 0x%x in AG 0x%x.  Initiating list recovery.",
3669 			agino, agno);
3670 
3671 	prev_agino = NULLAGINO;
3672 	next_agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3673 	while (next_agino != NULLAGINO) {
3674 		struct xfs_inode	*next_ip = NULL;
3675 
3676 		/* Found this caller's inode, set its backlink. */
3677 		if (next_agino == agino) {
3678 			next_ip = ip;
3679 			next_ip->i_prev_unlinked = prev_agino;
3680 			foundit = true;
3681 			goto next_inode;
3682 		}
3683 
3684 		/* Try in-memory lookup first. */
3685 		next_ip = xfs_iunlink_lookup(pag, next_agino);
3686 		if (next_ip)
3687 			goto next_inode;
3688 
3689 		/* Inode not in memory, try reloading it. */
3690 		error = xfs_iunlink_reload_next(tp, agibp, prev_agino,
3691 				next_agino);
3692 		if (error)
3693 			break;
3694 
3695 		/* Grab the reloaded inode. */
3696 		next_ip = xfs_iunlink_lookup(pag, next_agino);
3697 		if (!next_ip) {
3698 			/* No incore inode at all?  We reloaded it... */
3699 			ASSERT(next_ip != NULL);
3700 			error = -EFSCORRUPTED;
3701 			break;
3702 		}
3703 
3704 next_inode:
3705 		prev_agino = next_agino;
3706 		next_agino = next_ip->i_next_unlinked;
3707 	}
3708 
3709 out_agibp:
3710 	xfs_trans_brelse(tp, agibp);
3711 	/* Should have found this inode somewhere in the iunlinked bucket. */
3712 	if (!error && !foundit)
3713 		error = -EFSCORRUPTED;
3714 	return error;
3715 }
3716 
3717 /* Decide if this inode is missing its unlinked list and reload it. */
3718 int
xfs_inode_reload_unlinked(struct xfs_inode * ip)3719 xfs_inode_reload_unlinked(
3720 	struct xfs_inode	*ip)
3721 {
3722 	struct xfs_trans	*tp;
3723 	int			error;
3724 
3725 	error = xfs_trans_alloc_empty(ip->i_mount, &tp);
3726 	if (error)
3727 		return error;
3728 
3729 	xfs_ilock(ip, XFS_ILOCK_SHARED);
3730 	if (xfs_inode_unlinked_incomplete(ip))
3731 		error = xfs_inode_reload_unlinked_bucket(tp, ip);
3732 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
3733 	xfs_trans_cancel(tp);
3734 
3735 	return error;
3736 }
3737