1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_btree.h"
18 #include "xfs_rmap.h"
19 #include "xfs_alloc_btree.h"
20 #include "xfs_alloc.h"
21 #include "xfs_extent_busy.h"
22 #include "xfs_errortag.h"
23 #include "xfs_error.h"
24 #include "xfs_cksum.h"
25 #include "xfs_trace.h"
26 #include "xfs_trans.h"
27 #include "xfs_buf_item.h"
28 #include "xfs_log.h"
29 #include "xfs_ag_resv.h"
30 #include "xfs_bmap.h"
31 
32 extern kmem_zone_t	*xfs_bmap_free_item_zone;
33 
34 struct workqueue_struct *xfs_alloc_wq;
35 
36 #define XFS_ABSDIFF(a,b)	(((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
37 
38 #define	XFSA_FIXUP_BNO_OK	1
39 #define	XFSA_FIXUP_CNT_OK	2
40 
41 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
42 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
43 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
44 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
45 		xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
46 
47 /*
48  * Size of the AGFL.  For CRC-enabled filesystes we steal a couple of slots in
49  * the beginning of the block for a proper header with the location information
50  * and CRC.
51  */
52 unsigned int
xfs_agfl_size(struct xfs_mount * mp)53 xfs_agfl_size(
54 	struct xfs_mount	*mp)
55 {
56 	unsigned int		size = mp->m_sb.sb_sectsize;
57 
58 	if (xfs_sb_version_hascrc(&mp->m_sb))
59 		size -= sizeof(struct xfs_agfl);
60 
61 	return size / sizeof(xfs_agblock_t);
62 }
63 
64 unsigned int
xfs_refc_block(struct xfs_mount * mp)65 xfs_refc_block(
66 	struct xfs_mount	*mp)
67 {
68 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
69 		return XFS_RMAP_BLOCK(mp) + 1;
70 	if (xfs_sb_version_hasfinobt(&mp->m_sb))
71 		return XFS_FIBT_BLOCK(mp) + 1;
72 	return XFS_IBT_BLOCK(mp) + 1;
73 }
74 
75 xfs_extlen_t
xfs_prealloc_blocks(struct xfs_mount * mp)76 xfs_prealloc_blocks(
77 	struct xfs_mount	*mp)
78 {
79 	if (xfs_sb_version_hasreflink(&mp->m_sb))
80 		return xfs_refc_block(mp) + 1;
81 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
82 		return XFS_RMAP_BLOCK(mp) + 1;
83 	if (xfs_sb_version_hasfinobt(&mp->m_sb))
84 		return XFS_FIBT_BLOCK(mp) + 1;
85 	return XFS_IBT_BLOCK(mp) + 1;
86 }
87 
88 /*
89  * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
90  * AGF buffer (PV 947395), we place constraints on the relationship among
91  * actual allocations for data blocks, freelist blocks, and potential file data
92  * bmap btree blocks. However, these restrictions may result in no actual space
93  * allocated for a delayed extent, for example, a data block in a certain AG is
94  * allocated but there is no additional block for the additional bmap btree
95  * block due to a split of the bmap btree of the file. The result of this may
96  * lead to an infinite loop when the file gets flushed to disk and all delayed
97  * extents need to be actually allocated. To get around this, we explicitly set
98  * aside a few blocks which will not be reserved in delayed allocation.
99  *
100  * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
101  * potential split of the file's bmap btree.
102  */
103 unsigned int
xfs_alloc_set_aside(struct xfs_mount * mp)104 xfs_alloc_set_aside(
105 	struct xfs_mount	*mp)
106 {
107 	return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
108 }
109 
110 /*
111  * When deciding how much space to allocate out of an AG, we limit the
112  * allocation maximum size to the size the AG. However, we cannot use all the
113  * blocks in the AG - some are permanently used by metadata. These
114  * blocks are generally:
115  *	- the AG superblock, AGF, AGI and AGFL
116  *	- the AGF (bno and cnt) and AGI btree root blocks, and optionally
117  *	  the AGI free inode and rmap btree root blocks.
118  *	- blocks on the AGFL according to xfs_alloc_set_aside() limits
119  *	- the rmapbt root block
120  *
121  * The AG headers are sector sized, so the amount of space they take up is
122  * dependent on filesystem geometry. The others are all single blocks.
123  */
124 unsigned int
xfs_alloc_ag_max_usable(struct xfs_mount * mp)125 xfs_alloc_ag_max_usable(
126 	struct xfs_mount	*mp)
127 {
128 	unsigned int		blocks;
129 
130 	blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
131 	blocks += XFS_ALLOC_AGFL_RESERVE;
132 	blocks += 3;			/* AGF, AGI btree root blocks */
133 	if (xfs_sb_version_hasfinobt(&mp->m_sb))
134 		blocks++;		/* finobt root block */
135 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
136 		blocks++; 		/* rmap root block */
137 	if (xfs_sb_version_hasreflink(&mp->m_sb))
138 		blocks++;		/* refcount root block */
139 
140 	return mp->m_sb.sb_agblocks - blocks;
141 }
142 
143 /*
144  * Lookup the record equal to [bno, len] in the btree given by cur.
145  */
146 STATIC int				/* error */
xfs_alloc_lookup_eq(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)147 xfs_alloc_lookup_eq(
148 	struct xfs_btree_cur	*cur,	/* btree cursor */
149 	xfs_agblock_t		bno,	/* starting block of extent */
150 	xfs_extlen_t		len,	/* length of extent */
151 	int			*stat)	/* success/failure */
152 {
153 	cur->bc_rec.a.ar_startblock = bno;
154 	cur->bc_rec.a.ar_blockcount = len;
155 	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
156 }
157 
158 /*
159  * Lookup the first record greater than or equal to [bno, len]
160  * in the btree given by cur.
161  */
162 int				/* error */
xfs_alloc_lookup_ge(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)163 xfs_alloc_lookup_ge(
164 	struct xfs_btree_cur	*cur,	/* btree cursor */
165 	xfs_agblock_t		bno,	/* starting block of extent */
166 	xfs_extlen_t		len,	/* length of extent */
167 	int			*stat)	/* success/failure */
168 {
169 	cur->bc_rec.a.ar_startblock = bno;
170 	cur->bc_rec.a.ar_blockcount = len;
171 	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
172 }
173 
174 /*
175  * Lookup the first record less than or equal to [bno, len]
176  * in the btree given by cur.
177  */
178 int					/* error */
xfs_alloc_lookup_le(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)179 xfs_alloc_lookup_le(
180 	struct xfs_btree_cur	*cur,	/* btree cursor */
181 	xfs_agblock_t		bno,	/* starting block of extent */
182 	xfs_extlen_t		len,	/* length of extent */
183 	int			*stat)	/* success/failure */
184 {
185 	cur->bc_rec.a.ar_startblock = bno;
186 	cur->bc_rec.a.ar_blockcount = len;
187 	return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
188 }
189 
190 /*
191  * Update the record referred to by cur to the value given
192  * by [bno, len].
193  * This either works (return 0) or gets an EFSCORRUPTED error.
194  */
195 STATIC int				/* error */
xfs_alloc_update(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len)196 xfs_alloc_update(
197 	struct xfs_btree_cur	*cur,	/* btree cursor */
198 	xfs_agblock_t		bno,	/* starting block of extent */
199 	xfs_extlen_t		len)	/* length of extent */
200 {
201 	union xfs_btree_rec	rec;
202 
203 	rec.alloc.ar_startblock = cpu_to_be32(bno);
204 	rec.alloc.ar_blockcount = cpu_to_be32(len);
205 	return xfs_btree_update(cur, &rec);
206 }
207 
208 /*
209  * Get the data from the pointed-to record.
210  */
211 int					/* error */
xfs_alloc_get_rec(struct xfs_btree_cur * cur,xfs_agblock_t * bno,xfs_extlen_t * len,int * stat)212 xfs_alloc_get_rec(
213 	struct xfs_btree_cur	*cur,	/* btree cursor */
214 	xfs_agblock_t		*bno,	/* output: starting block of extent */
215 	xfs_extlen_t		*len,	/* output: length of extent */
216 	int			*stat)	/* output: success/failure */
217 {
218 	struct xfs_mount	*mp = cur->bc_mp;
219 	xfs_agnumber_t		agno = cur->bc_private.a.agno;
220 	union xfs_btree_rec	*rec;
221 	int			error;
222 
223 	error = xfs_btree_get_rec(cur, &rec, stat);
224 	if (error || !(*stat))
225 		return error;
226 
227 	*bno = be32_to_cpu(rec->alloc.ar_startblock);
228 	*len = be32_to_cpu(rec->alloc.ar_blockcount);
229 
230 	if (*len == 0)
231 		goto out_bad_rec;
232 
233 	/* check for valid extent range, including overflow */
234 	if (!xfs_verify_agbno(mp, agno, *bno))
235 		goto out_bad_rec;
236 	if (*bno > *bno + *len)
237 		goto out_bad_rec;
238 	if (!xfs_verify_agbno(mp, agno, *bno + *len - 1))
239 		goto out_bad_rec;
240 
241 	return 0;
242 
243 out_bad_rec:
244 	xfs_warn(mp,
245 		"%s Freespace BTree record corruption in AG %d detected!",
246 		cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size", agno);
247 	xfs_warn(mp,
248 		"start block 0x%x block count 0x%x", *bno, *len);
249 	return -EFSCORRUPTED;
250 }
251 
252 /*
253  * Compute aligned version of the found extent.
254  * Takes alignment and min length into account.
255  */
256 STATIC bool
xfs_alloc_compute_aligned(xfs_alloc_arg_t * args,xfs_agblock_t foundbno,xfs_extlen_t foundlen,xfs_agblock_t * resbno,xfs_extlen_t * reslen,unsigned * busy_gen)257 xfs_alloc_compute_aligned(
258 	xfs_alloc_arg_t	*args,		/* allocation argument structure */
259 	xfs_agblock_t	foundbno,	/* starting block in found extent */
260 	xfs_extlen_t	foundlen,	/* length in found extent */
261 	xfs_agblock_t	*resbno,	/* result block number */
262 	xfs_extlen_t	*reslen,	/* result length */
263 	unsigned	*busy_gen)
264 {
265 	xfs_agblock_t	bno = foundbno;
266 	xfs_extlen_t	len = foundlen;
267 	xfs_extlen_t	diff;
268 	bool		busy;
269 
270 	/* Trim busy sections out of found extent */
271 	busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
272 
273 	/*
274 	 * If we have a largish extent that happens to start before min_agbno,
275 	 * see if we can shift it into range...
276 	 */
277 	if (bno < args->min_agbno && bno + len > args->min_agbno) {
278 		diff = args->min_agbno - bno;
279 		if (len > diff) {
280 			bno += diff;
281 			len -= diff;
282 		}
283 	}
284 
285 	if (args->alignment > 1 && len >= args->minlen) {
286 		xfs_agblock_t	aligned_bno = roundup(bno, args->alignment);
287 
288 		diff = aligned_bno - bno;
289 
290 		*resbno = aligned_bno;
291 		*reslen = diff >= len ? 0 : len - diff;
292 	} else {
293 		*resbno = bno;
294 		*reslen = len;
295 	}
296 
297 	return busy;
298 }
299 
300 /*
301  * Compute best start block and diff for "near" allocations.
302  * freelen >= wantlen already checked by caller.
303  */
304 STATIC xfs_extlen_t			/* difference value (absolute) */
xfs_alloc_compute_diff(xfs_agblock_t wantbno,xfs_extlen_t wantlen,xfs_extlen_t alignment,int datatype,xfs_agblock_t freebno,xfs_extlen_t freelen,xfs_agblock_t * newbnop)305 xfs_alloc_compute_diff(
306 	xfs_agblock_t	wantbno,	/* target starting block */
307 	xfs_extlen_t	wantlen,	/* target length */
308 	xfs_extlen_t	alignment,	/* target alignment */
309 	int		datatype,	/* are we allocating data? */
310 	xfs_agblock_t	freebno,	/* freespace's starting block */
311 	xfs_extlen_t	freelen,	/* freespace's length */
312 	xfs_agblock_t	*newbnop)	/* result: best start block from free */
313 {
314 	xfs_agblock_t	freeend;	/* end of freespace extent */
315 	xfs_agblock_t	newbno1;	/* return block number */
316 	xfs_agblock_t	newbno2;	/* other new block number */
317 	xfs_extlen_t	newlen1=0;	/* length with newbno1 */
318 	xfs_extlen_t	newlen2=0;	/* length with newbno2 */
319 	xfs_agblock_t	wantend;	/* end of target extent */
320 	bool		userdata = xfs_alloc_is_userdata(datatype);
321 
322 	ASSERT(freelen >= wantlen);
323 	freeend = freebno + freelen;
324 	wantend = wantbno + wantlen;
325 	/*
326 	 * We want to allocate from the start of a free extent if it is past
327 	 * the desired block or if we are allocating user data and the free
328 	 * extent is before desired block. The second case is there to allow
329 	 * for contiguous allocation from the remaining free space if the file
330 	 * grows in the short term.
331 	 */
332 	if (freebno >= wantbno || (userdata && freeend < wantend)) {
333 		if ((newbno1 = roundup(freebno, alignment)) >= freeend)
334 			newbno1 = NULLAGBLOCK;
335 	} else if (freeend >= wantend && alignment > 1) {
336 		newbno1 = roundup(wantbno, alignment);
337 		newbno2 = newbno1 - alignment;
338 		if (newbno1 >= freeend)
339 			newbno1 = NULLAGBLOCK;
340 		else
341 			newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
342 		if (newbno2 < freebno)
343 			newbno2 = NULLAGBLOCK;
344 		else
345 			newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
346 		if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
347 			if (newlen1 < newlen2 ||
348 			    (newlen1 == newlen2 &&
349 			     XFS_ABSDIFF(newbno1, wantbno) >
350 			     XFS_ABSDIFF(newbno2, wantbno)))
351 				newbno1 = newbno2;
352 		} else if (newbno2 != NULLAGBLOCK)
353 			newbno1 = newbno2;
354 	} else if (freeend >= wantend) {
355 		newbno1 = wantbno;
356 	} else if (alignment > 1) {
357 		newbno1 = roundup(freeend - wantlen, alignment);
358 		if (newbno1 > freeend - wantlen &&
359 		    newbno1 - alignment >= freebno)
360 			newbno1 -= alignment;
361 		else if (newbno1 >= freeend)
362 			newbno1 = NULLAGBLOCK;
363 	} else
364 		newbno1 = freeend - wantlen;
365 	*newbnop = newbno1;
366 	return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
367 }
368 
369 /*
370  * Fix up the length, based on mod and prod.
371  * len should be k * prod + mod for some k.
372  * If len is too small it is returned unchanged.
373  * If len hits maxlen it is left alone.
374  */
375 STATIC void
xfs_alloc_fix_len(xfs_alloc_arg_t * args)376 xfs_alloc_fix_len(
377 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
378 {
379 	xfs_extlen_t	k;
380 	xfs_extlen_t	rlen;
381 
382 	ASSERT(args->mod < args->prod);
383 	rlen = args->len;
384 	ASSERT(rlen >= args->minlen);
385 	ASSERT(rlen <= args->maxlen);
386 	if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
387 	    (args->mod == 0 && rlen < args->prod))
388 		return;
389 	k = rlen % args->prod;
390 	if (k == args->mod)
391 		return;
392 	if (k > args->mod)
393 		rlen = rlen - (k - args->mod);
394 	else
395 		rlen = rlen - args->prod + (args->mod - k);
396 	/* casts to (int) catch length underflows */
397 	if ((int)rlen < (int)args->minlen)
398 		return;
399 	ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
400 	ASSERT(rlen % args->prod == args->mod);
401 	ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
402 		rlen + args->minleft);
403 	args->len = rlen;
404 }
405 
406 /*
407  * Update the two btrees, logically removing from freespace the extent
408  * starting at rbno, rlen blocks.  The extent is contained within the
409  * actual (current) free extent fbno for flen blocks.
410  * Flags are passed in indicating whether the cursors are set to the
411  * relevant records.
412  */
413 STATIC int				/* error code */
xfs_alloc_fixup_trees(xfs_btree_cur_t * cnt_cur,xfs_btree_cur_t * bno_cur,xfs_agblock_t fbno,xfs_extlen_t flen,xfs_agblock_t rbno,xfs_extlen_t rlen,int flags)414 xfs_alloc_fixup_trees(
415 	xfs_btree_cur_t	*cnt_cur,	/* cursor for by-size btree */
416 	xfs_btree_cur_t	*bno_cur,	/* cursor for by-block btree */
417 	xfs_agblock_t	fbno,		/* starting block of free extent */
418 	xfs_extlen_t	flen,		/* length of free extent */
419 	xfs_agblock_t	rbno,		/* starting block of returned extent */
420 	xfs_extlen_t	rlen,		/* length of returned extent */
421 	int		flags)		/* flags, XFSA_FIXUP_... */
422 {
423 	int		error;		/* error code */
424 	int		i;		/* operation results */
425 	xfs_agblock_t	nfbno1;		/* first new free startblock */
426 	xfs_agblock_t	nfbno2;		/* second new free startblock */
427 	xfs_extlen_t	nflen1=0;	/* first new free length */
428 	xfs_extlen_t	nflen2=0;	/* second new free length */
429 	struct xfs_mount *mp;
430 
431 	mp = cnt_cur->bc_mp;
432 
433 	/*
434 	 * Look up the record in the by-size tree if necessary.
435 	 */
436 	if (flags & XFSA_FIXUP_CNT_OK) {
437 #ifdef DEBUG
438 		if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
439 			return error;
440 		XFS_WANT_CORRUPTED_RETURN(mp,
441 			i == 1 && nfbno1 == fbno && nflen1 == flen);
442 #endif
443 	} else {
444 		if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
445 			return error;
446 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
447 	}
448 	/*
449 	 * Look up the record in the by-block tree if necessary.
450 	 */
451 	if (flags & XFSA_FIXUP_BNO_OK) {
452 #ifdef DEBUG
453 		if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
454 			return error;
455 		XFS_WANT_CORRUPTED_RETURN(mp,
456 			i == 1 && nfbno1 == fbno && nflen1 == flen);
457 #endif
458 	} else {
459 		if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
460 			return error;
461 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
462 	}
463 
464 #ifdef DEBUG
465 	if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
466 		struct xfs_btree_block	*bnoblock;
467 		struct xfs_btree_block	*cntblock;
468 
469 		bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
470 		cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
471 
472 		XFS_WANT_CORRUPTED_RETURN(mp,
473 			bnoblock->bb_numrecs == cntblock->bb_numrecs);
474 	}
475 #endif
476 
477 	/*
478 	 * Deal with all four cases: the allocated record is contained
479 	 * within the freespace record, so we can have new freespace
480 	 * at either (or both) end, or no freespace remaining.
481 	 */
482 	if (rbno == fbno && rlen == flen)
483 		nfbno1 = nfbno2 = NULLAGBLOCK;
484 	else if (rbno == fbno) {
485 		nfbno1 = rbno + rlen;
486 		nflen1 = flen - rlen;
487 		nfbno2 = NULLAGBLOCK;
488 	} else if (rbno + rlen == fbno + flen) {
489 		nfbno1 = fbno;
490 		nflen1 = flen - rlen;
491 		nfbno2 = NULLAGBLOCK;
492 	} else {
493 		nfbno1 = fbno;
494 		nflen1 = rbno - fbno;
495 		nfbno2 = rbno + rlen;
496 		nflen2 = (fbno + flen) - nfbno2;
497 	}
498 	/*
499 	 * Delete the entry from the by-size btree.
500 	 */
501 	if ((error = xfs_btree_delete(cnt_cur, &i)))
502 		return error;
503 	XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
504 	/*
505 	 * Add new by-size btree entry(s).
506 	 */
507 	if (nfbno1 != NULLAGBLOCK) {
508 		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
509 			return error;
510 		XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
511 		if ((error = xfs_btree_insert(cnt_cur, &i)))
512 			return error;
513 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
514 	}
515 	if (nfbno2 != NULLAGBLOCK) {
516 		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
517 			return error;
518 		XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
519 		if ((error = xfs_btree_insert(cnt_cur, &i)))
520 			return error;
521 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
522 	}
523 	/*
524 	 * Fix up the by-block btree entry(s).
525 	 */
526 	if (nfbno1 == NULLAGBLOCK) {
527 		/*
528 		 * No remaining freespace, just delete the by-block tree entry.
529 		 */
530 		if ((error = xfs_btree_delete(bno_cur, &i)))
531 			return error;
532 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
533 	} else {
534 		/*
535 		 * Update the by-block entry to start later|be shorter.
536 		 */
537 		if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
538 			return error;
539 	}
540 	if (nfbno2 != NULLAGBLOCK) {
541 		/*
542 		 * 2 resulting free entries, need to add one.
543 		 */
544 		if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
545 			return error;
546 		XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
547 		if ((error = xfs_btree_insert(bno_cur, &i)))
548 			return error;
549 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
550 	}
551 	return 0;
552 }
553 
554 static xfs_failaddr_t
xfs_agfl_verify(struct xfs_buf * bp)555 xfs_agfl_verify(
556 	struct xfs_buf	*bp)
557 {
558 	struct xfs_mount *mp = bp->b_target->bt_mount;
559 	struct xfs_agfl	*agfl = XFS_BUF_TO_AGFL(bp);
560 	int		i;
561 
562 	/*
563 	 * There is no verification of non-crc AGFLs because mkfs does not
564 	 * initialise the AGFL to zero or NULL. Hence the only valid part of the
565 	 * AGFL is what the AGF says is active. We can't get to the AGF, so we
566 	 * can't verify just those entries are valid.
567 	 */
568 	if (!xfs_sb_version_hascrc(&mp->m_sb))
569 		return NULL;
570 
571 	if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
572 		return __this_address;
573 	if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
574 		return __this_address;
575 	/*
576 	 * during growfs operations, the perag is not fully initialised,
577 	 * so we can't use it for any useful checking. growfs ensures we can't
578 	 * use it by using uncached buffers that don't have the perag attached
579 	 * so we can detect and avoid this problem.
580 	 */
581 	if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
582 		return __this_address;
583 
584 	for (i = 0; i < xfs_agfl_size(mp); i++) {
585 		if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
586 		    be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
587 			return __this_address;
588 	}
589 
590 	if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
591 		return __this_address;
592 	return NULL;
593 }
594 
595 static void
xfs_agfl_read_verify(struct xfs_buf * bp)596 xfs_agfl_read_verify(
597 	struct xfs_buf	*bp)
598 {
599 	struct xfs_mount *mp = bp->b_target->bt_mount;
600 	xfs_failaddr_t	fa;
601 
602 	/*
603 	 * There is no verification of non-crc AGFLs because mkfs does not
604 	 * initialise the AGFL to zero or NULL. Hence the only valid part of the
605 	 * AGFL is what the AGF says is active. We can't get to the AGF, so we
606 	 * can't verify just those entries are valid.
607 	 */
608 	if (!xfs_sb_version_hascrc(&mp->m_sb))
609 		return;
610 
611 	if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
612 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
613 	else {
614 		fa = xfs_agfl_verify(bp);
615 		if (fa)
616 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
617 	}
618 }
619 
620 static void
xfs_agfl_write_verify(struct xfs_buf * bp)621 xfs_agfl_write_verify(
622 	struct xfs_buf	*bp)
623 {
624 	struct xfs_mount	*mp = bp->b_target->bt_mount;
625 	struct xfs_buf_log_item	*bip = bp->b_log_item;
626 	xfs_failaddr_t		fa;
627 
628 	/* no verification of non-crc AGFLs */
629 	if (!xfs_sb_version_hascrc(&mp->m_sb))
630 		return;
631 
632 	fa = xfs_agfl_verify(bp);
633 	if (fa) {
634 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
635 		return;
636 	}
637 
638 	if (bip)
639 		XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
640 
641 	xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
642 }
643 
644 const struct xfs_buf_ops xfs_agfl_buf_ops = {
645 	.name = "xfs_agfl",
646 	.verify_read = xfs_agfl_read_verify,
647 	.verify_write = xfs_agfl_write_verify,
648 	.verify_struct = xfs_agfl_verify,
649 };
650 
651 /*
652  * Read in the allocation group free block array.
653  */
654 int					/* error */
xfs_alloc_read_agfl(xfs_mount_t * mp,xfs_trans_t * tp,xfs_agnumber_t agno,xfs_buf_t ** bpp)655 xfs_alloc_read_agfl(
656 	xfs_mount_t	*mp,		/* mount point structure */
657 	xfs_trans_t	*tp,		/* transaction pointer */
658 	xfs_agnumber_t	agno,		/* allocation group number */
659 	xfs_buf_t	**bpp)		/* buffer for the ag free block array */
660 {
661 	xfs_buf_t	*bp;		/* return value */
662 	int		error;
663 
664 	ASSERT(agno != NULLAGNUMBER);
665 	error = xfs_trans_read_buf(
666 			mp, tp, mp->m_ddev_targp,
667 			XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
668 			XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
669 	if (error)
670 		return error;
671 	xfs_buf_set_ref(bp, XFS_AGFL_REF);
672 	*bpp = bp;
673 	return 0;
674 }
675 
676 STATIC int
xfs_alloc_update_counters(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_buf * agbp,long len)677 xfs_alloc_update_counters(
678 	struct xfs_trans	*tp,
679 	struct xfs_perag	*pag,
680 	struct xfs_buf		*agbp,
681 	long			len)
682 {
683 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
684 
685 	pag->pagf_freeblks += len;
686 	be32_add_cpu(&agf->agf_freeblks, len);
687 
688 	xfs_trans_agblocks_delta(tp, len);
689 	if (unlikely(be32_to_cpu(agf->agf_freeblks) >
690 		     be32_to_cpu(agf->agf_length)))
691 		return -EFSCORRUPTED;
692 
693 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
694 	return 0;
695 }
696 
697 /*
698  * Allocation group level functions.
699  */
700 
701 /*
702  * Allocate a variable extent in the allocation group agno.
703  * Type and bno are used to determine where in the allocation group the
704  * extent will start.
705  * Extent's length (returned in *len) will be between minlen and maxlen,
706  * and of the form k * prod + mod unless there's nothing that large.
707  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
708  */
709 STATIC int			/* error */
xfs_alloc_ag_vextent(xfs_alloc_arg_t * args)710 xfs_alloc_ag_vextent(
711 	xfs_alloc_arg_t	*args)	/* argument structure for allocation */
712 {
713 	int		error=0;
714 
715 	ASSERT(args->minlen > 0);
716 	ASSERT(args->maxlen > 0);
717 	ASSERT(args->minlen <= args->maxlen);
718 	ASSERT(args->mod < args->prod);
719 	ASSERT(args->alignment > 0);
720 
721 	/*
722 	 * Branch to correct routine based on the type.
723 	 */
724 	args->wasfromfl = 0;
725 	switch (args->type) {
726 	case XFS_ALLOCTYPE_THIS_AG:
727 		error = xfs_alloc_ag_vextent_size(args);
728 		break;
729 	case XFS_ALLOCTYPE_NEAR_BNO:
730 		error = xfs_alloc_ag_vextent_near(args);
731 		break;
732 	case XFS_ALLOCTYPE_THIS_BNO:
733 		error = xfs_alloc_ag_vextent_exact(args);
734 		break;
735 	default:
736 		ASSERT(0);
737 		/* NOTREACHED */
738 	}
739 
740 	if (error || args->agbno == NULLAGBLOCK)
741 		return error;
742 
743 	ASSERT(args->len >= args->minlen);
744 	ASSERT(args->len <= args->maxlen);
745 	ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
746 	ASSERT(args->agbno % args->alignment == 0);
747 
748 	/* if not file data, insert new block into the reverse map btree */
749 	if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
750 		error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
751 				       args->agbno, args->len, &args->oinfo);
752 		if (error)
753 			return error;
754 	}
755 
756 	if (!args->wasfromfl) {
757 		error = xfs_alloc_update_counters(args->tp, args->pag,
758 						  args->agbp,
759 						  -((long)(args->len)));
760 		if (error)
761 			return error;
762 
763 		ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
764 					      args->agbno, args->len));
765 	}
766 
767 	xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
768 
769 	XFS_STATS_INC(args->mp, xs_allocx);
770 	XFS_STATS_ADD(args->mp, xs_allocb, args->len);
771 	return error;
772 }
773 
774 /*
775  * Allocate a variable extent at exactly agno/bno.
776  * Extent's length (returned in *len) will be between minlen and maxlen,
777  * and of the form k * prod + mod unless there's nothing that large.
778  * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
779  */
780 STATIC int			/* error */
xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t * args)781 xfs_alloc_ag_vextent_exact(
782 	xfs_alloc_arg_t	*args)	/* allocation argument structure */
783 {
784 	xfs_btree_cur_t	*bno_cur;/* by block-number btree cursor */
785 	xfs_btree_cur_t	*cnt_cur;/* by count btree cursor */
786 	int		error;
787 	xfs_agblock_t	fbno;	/* start block of found extent */
788 	xfs_extlen_t	flen;	/* length of found extent */
789 	xfs_agblock_t	tbno;	/* start block of busy extent */
790 	xfs_extlen_t	tlen;	/* length of busy extent */
791 	xfs_agblock_t	tend;	/* end block of busy extent */
792 	int		i;	/* success/failure of operation */
793 	unsigned	busy_gen;
794 
795 	ASSERT(args->alignment == 1);
796 
797 	/*
798 	 * Allocate/initialize a cursor for the by-number freespace btree.
799 	 */
800 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
801 					  args->agno, XFS_BTNUM_BNO);
802 
803 	/*
804 	 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
805 	 * Look for the closest free block <= bno, it must contain bno
806 	 * if any free block does.
807 	 */
808 	error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
809 	if (error)
810 		goto error0;
811 	if (!i)
812 		goto not_found;
813 
814 	/*
815 	 * Grab the freespace record.
816 	 */
817 	error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
818 	if (error)
819 		goto error0;
820 	XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
821 	ASSERT(fbno <= args->agbno);
822 
823 	/*
824 	 * Check for overlapping busy extents.
825 	 */
826 	tbno = fbno;
827 	tlen = flen;
828 	xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
829 
830 	/*
831 	 * Give up if the start of the extent is busy, or the freespace isn't
832 	 * long enough for the minimum request.
833 	 */
834 	if (tbno > args->agbno)
835 		goto not_found;
836 	if (tlen < args->minlen)
837 		goto not_found;
838 	tend = tbno + tlen;
839 	if (tend < args->agbno + args->minlen)
840 		goto not_found;
841 
842 	/*
843 	 * End of extent will be smaller of the freespace end and the
844 	 * maximal requested end.
845 	 *
846 	 * Fix the length according to mod and prod if given.
847 	 */
848 	args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
849 						- args->agbno;
850 	xfs_alloc_fix_len(args);
851 	ASSERT(args->agbno + args->len <= tend);
852 
853 	/*
854 	 * We are allocating agbno for args->len
855 	 * Allocate/initialize a cursor for the by-size btree.
856 	 */
857 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
858 		args->agno, XFS_BTNUM_CNT);
859 	ASSERT(args->agbno + args->len <=
860 		be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
861 	error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
862 				      args->len, XFSA_FIXUP_BNO_OK);
863 	if (error) {
864 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
865 		goto error0;
866 	}
867 
868 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
869 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
870 
871 	args->wasfromfl = 0;
872 	trace_xfs_alloc_exact_done(args);
873 	return 0;
874 
875 not_found:
876 	/* Didn't find it, return null. */
877 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
878 	args->agbno = NULLAGBLOCK;
879 	trace_xfs_alloc_exact_notfound(args);
880 	return 0;
881 
882 error0:
883 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
884 	trace_xfs_alloc_exact_error(args);
885 	return error;
886 }
887 
888 /*
889  * Search the btree in a given direction via the search cursor and compare
890  * the records found against the good extent we've already found.
891  */
892 STATIC int
xfs_alloc_find_best_extent(struct xfs_alloc_arg * args,struct xfs_btree_cur ** gcur,struct xfs_btree_cur ** scur,xfs_agblock_t gdiff,xfs_agblock_t * sbno,xfs_extlen_t * slen,xfs_agblock_t * sbnoa,xfs_extlen_t * slena,int dir)893 xfs_alloc_find_best_extent(
894 	struct xfs_alloc_arg	*args,	/* allocation argument structure */
895 	struct xfs_btree_cur	**gcur,	/* good cursor */
896 	struct xfs_btree_cur	**scur,	/* searching cursor */
897 	xfs_agblock_t		gdiff,	/* difference for search comparison */
898 	xfs_agblock_t		*sbno,	/* extent found by search */
899 	xfs_extlen_t		*slen,	/* extent length */
900 	xfs_agblock_t		*sbnoa,	/* aligned extent found by search */
901 	xfs_extlen_t		*slena,	/* aligned extent length */
902 	int			dir)	/* 0 = search right, 1 = search left */
903 {
904 	xfs_agblock_t		new;
905 	xfs_agblock_t		sdiff;
906 	int			error;
907 	int			i;
908 	unsigned		busy_gen;
909 
910 	/* The good extent is perfect, no need to  search. */
911 	if (!gdiff)
912 		goto out_use_good;
913 
914 	/*
915 	 * Look until we find a better one, run out of space or run off the end.
916 	 */
917 	do {
918 		error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
919 		if (error)
920 			goto error0;
921 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
922 		xfs_alloc_compute_aligned(args, *sbno, *slen,
923 				sbnoa, slena, &busy_gen);
924 
925 		/*
926 		 * The good extent is closer than this one.
927 		 */
928 		if (!dir) {
929 			if (*sbnoa > args->max_agbno)
930 				goto out_use_good;
931 			if (*sbnoa >= args->agbno + gdiff)
932 				goto out_use_good;
933 		} else {
934 			if (*sbnoa < args->min_agbno)
935 				goto out_use_good;
936 			if (*sbnoa <= args->agbno - gdiff)
937 				goto out_use_good;
938 		}
939 
940 		/*
941 		 * Same distance, compare length and pick the best.
942 		 */
943 		if (*slena >= args->minlen) {
944 			args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
945 			xfs_alloc_fix_len(args);
946 
947 			sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
948 						       args->alignment,
949 						       args->datatype, *sbnoa,
950 						       *slena, &new);
951 
952 			/*
953 			 * Choose closer size and invalidate other cursor.
954 			 */
955 			if (sdiff < gdiff)
956 				goto out_use_search;
957 			goto out_use_good;
958 		}
959 
960 		if (!dir)
961 			error = xfs_btree_increment(*scur, 0, &i);
962 		else
963 			error = xfs_btree_decrement(*scur, 0, &i);
964 		if (error)
965 			goto error0;
966 	} while (i);
967 
968 out_use_good:
969 	xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
970 	*scur = NULL;
971 	return 0;
972 
973 out_use_search:
974 	xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
975 	*gcur = NULL;
976 	return 0;
977 
978 error0:
979 	/* caller invalidates cursors */
980 	return error;
981 }
982 
983 /*
984  * Allocate a variable extent near bno in the allocation group agno.
985  * Extent's length (returned in len) will be between minlen and maxlen,
986  * and of the form k * prod + mod unless there's nothing that large.
987  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
988  */
989 STATIC int				/* error */
xfs_alloc_ag_vextent_near(xfs_alloc_arg_t * args)990 xfs_alloc_ag_vextent_near(
991 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
992 {
993 	xfs_btree_cur_t	*bno_cur_gt;	/* cursor for bno btree, right side */
994 	xfs_btree_cur_t	*bno_cur_lt;	/* cursor for bno btree, left side */
995 	xfs_btree_cur_t	*cnt_cur;	/* cursor for count btree */
996 	xfs_agblock_t	gtbno;		/* start bno of right side entry */
997 	xfs_agblock_t	gtbnoa;		/* aligned ... */
998 	xfs_extlen_t	gtdiff;		/* difference to right side entry */
999 	xfs_extlen_t	gtlen;		/* length of right side entry */
1000 	xfs_extlen_t	gtlena;		/* aligned ... */
1001 	xfs_agblock_t	gtnew;		/* useful start bno of right side */
1002 	int		error;		/* error code */
1003 	int		i;		/* result code, temporary */
1004 	int		j;		/* result code, temporary */
1005 	xfs_agblock_t	ltbno;		/* start bno of left side entry */
1006 	xfs_agblock_t	ltbnoa;		/* aligned ... */
1007 	xfs_extlen_t	ltdiff;		/* difference to left side entry */
1008 	xfs_extlen_t	ltlen;		/* length of left side entry */
1009 	xfs_extlen_t	ltlena;		/* aligned ... */
1010 	xfs_agblock_t	ltnew;		/* useful start bno of left side */
1011 	xfs_extlen_t	rlen;		/* length of returned extent */
1012 	bool		busy;
1013 	unsigned	busy_gen;
1014 #ifdef DEBUG
1015 	/*
1016 	 * Randomly don't execute the first algorithm.
1017 	 */
1018 	int		dofirst;	/* set to do first algorithm */
1019 
1020 	dofirst = prandom_u32() & 1;
1021 #endif
1022 
1023 	/* handle unitialized agbno range so caller doesn't have to */
1024 	if (!args->min_agbno && !args->max_agbno)
1025 		args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1026 	ASSERT(args->min_agbno <= args->max_agbno);
1027 
1028 	/* clamp agbno to the range if it's outside */
1029 	if (args->agbno < args->min_agbno)
1030 		args->agbno = args->min_agbno;
1031 	if (args->agbno > args->max_agbno)
1032 		args->agbno = args->max_agbno;
1033 
1034 restart:
1035 	bno_cur_lt = NULL;
1036 	bno_cur_gt = NULL;
1037 	ltlen = 0;
1038 	gtlena = 0;
1039 	ltlena = 0;
1040 	busy = false;
1041 
1042 	/*
1043 	 * Get a cursor for the by-size btree.
1044 	 */
1045 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1046 		args->agno, XFS_BTNUM_CNT);
1047 
1048 	/*
1049 	 * See if there are any free extents as big as maxlen.
1050 	 */
1051 	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
1052 		goto error0;
1053 	/*
1054 	 * If none, then pick up the last entry in the tree unless the
1055 	 * tree is empty.
1056 	 */
1057 	if (!i) {
1058 		if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
1059 				&ltlen, &i)))
1060 			goto error0;
1061 		if (i == 0 || ltlen == 0) {
1062 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1063 			trace_xfs_alloc_near_noentry(args);
1064 			return 0;
1065 		}
1066 		ASSERT(i == 1);
1067 	}
1068 	args->wasfromfl = 0;
1069 
1070 	/*
1071 	 * First algorithm.
1072 	 * If the requested extent is large wrt the freespaces available
1073 	 * in this a.g., then the cursor will be pointing to a btree entry
1074 	 * near the right edge of the tree.  If it's in the last btree leaf
1075 	 * block, then we just examine all the entries in that block
1076 	 * that are big enough, and pick the best one.
1077 	 * This is written as a while loop so we can break out of it,
1078 	 * but we never loop back to the top.
1079 	 */
1080 	while (xfs_btree_islastblock(cnt_cur, 0)) {
1081 		xfs_extlen_t	bdiff;
1082 		int		besti=0;
1083 		xfs_extlen_t	blen=0;
1084 		xfs_agblock_t	bnew=0;
1085 
1086 #ifdef DEBUG
1087 		if (dofirst)
1088 			break;
1089 #endif
1090 		/*
1091 		 * Start from the entry that lookup found, sequence through
1092 		 * all larger free blocks.  If we're actually pointing at a
1093 		 * record smaller than maxlen, go to the start of this block,
1094 		 * and skip all those smaller than minlen.
1095 		 */
1096 		if (ltlen || args->alignment > 1) {
1097 			cnt_cur->bc_ptrs[0] = 1;
1098 			do {
1099 				if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
1100 						&ltlen, &i)))
1101 					goto error0;
1102 				XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1103 				if (ltlen >= args->minlen)
1104 					break;
1105 				if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
1106 					goto error0;
1107 			} while (i);
1108 			ASSERT(ltlen >= args->minlen);
1109 			if (!i)
1110 				break;
1111 		}
1112 		i = cnt_cur->bc_ptrs[0];
1113 		for (j = 1, blen = 0, bdiff = 0;
1114 		     !error && j && (blen < args->maxlen || bdiff > 0);
1115 		     error = xfs_btree_increment(cnt_cur, 0, &j)) {
1116 			/*
1117 			 * For each entry, decide if it's better than
1118 			 * the previous best entry.
1119 			 */
1120 			if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1121 				goto error0;
1122 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1123 			busy = xfs_alloc_compute_aligned(args, ltbno, ltlen,
1124 					&ltbnoa, &ltlena, &busy_gen);
1125 			if (ltlena < args->minlen)
1126 				continue;
1127 			if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno)
1128 				continue;
1129 			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1130 			xfs_alloc_fix_len(args);
1131 			ASSERT(args->len >= args->minlen);
1132 			if (args->len < blen)
1133 				continue;
1134 			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1135 				args->alignment, args->datatype, ltbnoa,
1136 				ltlena, &ltnew);
1137 			if (ltnew != NULLAGBLOCK &&
1138 			    (args->len > blen || ltdiff < bdiff)) {
1139 				bdiff = ltdiff;
1140 				bnew = ltnew;
1141 				blen = args->len;
1142 				besti = cnt_cur->bc_ptrs[0];
1143 			}
1144 		}
1145 		/*
1146 		 * It didn't work.  We COULD be in a case where
1147 		 * there's a good record somewhere, so try again.
1148 		 */
1149 		if (blen == 0)
1150 			break;
1151 		/*
1152 		 * Point at the best entry, and retrieve it again.
1153 		 */
1154 		cnt_cur->bc_ptrs[0] = besti;
1155 		if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1156 			goto error0;
1157 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1158 		ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1159 		args->len = blen;
1160 
1161 		/*
1162 		 * We are allocating starting at bnew for blen blocks.
1163 		 */
1164 		args->agbno = bnew;
1165 		ASSERT(bnew >= ltbno);
1166 		ASSERT(bnew + blen <= ltbno + ltlen);
1167 		/*
1168 		 * Set up a cursor for the by-bno tree.
1169 		 */
1170 		bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
1171 			args->agbp, args->agno, XFS_BTNUM_BNO);
1172 		/*
1173 		 * Fix up the btree entries.
1174 		 */
1175 		if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
1176 				ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
1177 			goto error0;
1178 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1179 		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1180 
1181 		trace_xfs_alloc_near_first(args);
1182 		return 0;
1183 	}
1184 	/*
1185 	 * Second algorithm.
1186 	 * Search in the by-bno tree to the left and to the right
1187 	 * simultaneously, until in each case we find a space big enough,
1188 	 * or run into the edge of the tree.  When we run into the edge,
1189 	 * we deallocate that cursor.
1190 	 * If both searches succeed, we compare the two spaces and pick
1191 	 * the better one.
1192 	 * With alignment, it's possible for both to fail; the upper
1193 	 * level algorithm that picks allocation groups for allocations
1194 	 * is not supposed to do this.
1195 	 */
1196 	/*
1197 	 * Allocate and initialize the cursor for the leftward search.
1198 	 */
1199 	bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1200 		args->agno, XFS_BTNUM_BNO);
1201 	/*
1202 	 * Lookup <= bno to find the leftward search's starting point.
1203 	 */
1204 	if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
1205 		goto error0;
1206 	if (!i) {
1207 		/*
1208 		 * Didn't find anything; use this cursor for the rightward
1209 		 * search.
1210 		 */
1211 		bno_cur_gt = bno_cur_lt;
1212 		bno_cur_lt = NULL;
1213 	}
1214 	/*
1215 	 * Found something.  Duplicate the cursor for the rightward search.
1216 	 */
1217 	else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
1218 		goto error0;
1219 	/*
1220 	 * Increment the cursor, so we will point at the entry just right
1221 	 * of the leftward entry if any, or to the leftmost entry.
1222 	 */
1223 	if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1224 		goto error0;
1225 	if (!i) {
1226 		/*
1227 		 * It failed, there are no rightward entries.
1228 		 */
1229 		xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
1230 		bno_cur_gt = NULL;
1231 	}
1232 	/*
1233 	 * Loop going left with the leftward cursor, right with the
1234 	 * rightward cursor, until either both directions give up or
1235 	 * we find an entry at least as big as minlen.
1236 	 */
1237 	do {
1238 		if (bno_cur_lt) {
1239 			if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
1240 				goto error0;
1241 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1242 			busy |= xfs_alloc_compute_aligned(args, ltbno, ltlen,
1243 					&ltbnoa, &ltlena, &busy_gen);
1244 			if (ltlena >= args->minlen && ltbnoa >= args->min_agbno)
1245 				break;
1246 			if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1247 				goto error0;
1248 			if (!i || ltbnoa < args->min_agbno) {
1249 				xfs_btree_del_cursor(bno_cur_lt,
1250 						     XFS_BTREE_NOERROR);
1251 				bno_cur_lt = NULL;
1252 			}
1253 		}
1254 		if (bno_cur_gt) {
1255 			if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
1256 				goto error0;
1257 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1258 			busy |= xfs_alloc_compute_aligned(args, gtbno, gtlen,
1259 					&gtbnoa, &gtlena, &busy_gen);
1260 			if (gtlena >= args->minlen && gtbnoa <= args->max_agbno)
1261 				break;
1262 			if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1263 				goto error0;
1264 			if (!i || gtbnoa > args->max_agbno) {
1265 				xfs_btree_del_cursor(bno_cur_gt,
1266 						     XFS_BTREE_NOERROR);
1267 				bno_cur_gt = NULL;
1268 			}
1269 		}
1270 	} while (bno_cur_lt || bno_cur_gt);
1271 
1272 	/*
1273 	 * Got both cursors still active, need to find better entry.
1274 	 */
1275 	if (bno_cur_lt && bno_cur_gt) {
1276 		if (ltlena >= args->minlen) {
1277 			/*
1278 			 * Left side is good, look for a right side entry.
1279 			 */
1280 			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1281 			xfs_alloc_fix_len(args);
1282 			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1283 				args->alignment, args->datatype, ltbnoa,
1284 				ltlena, &ltnew);
1285 
1286 			error = xfs_alloc_find_best_extent(args,
1287 						&bno_cur_lt, &bno_cur_gt,
1288 						ltdiff, &gtbno, &gtlen,
1289 						&gtbnoa, &gtlena,
1290 						0 /* search right */);
1291 		} else {
1292 			ASSERT(gtlena >= args->minlen);
1293 
1294 			/*
1295 			 * Right side is good, look for a left side entry.
1296 			 */
1297 			args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1298 			xfs_alloc_fix_len(args);
1299 			gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1300 				args->alignment, args->datatype, gtbnoa,
1301 				gtlena, &gtnew);
1302 
1303 			error = xfs_alloc_find_best_extent(args,
1304 						&bno_cur_gt, &bno_cur_lt,
1305 						gtdiff, &ltbno, &ltlen,
1306 						&ltbnoa, &ltlena,
1307 						1 /* search left */);
1308 		}
1309 
1310 		if (error)
1311 			goto error0;
1312 	}
1313 
1314 	/*
1315 	 * If we couldn't get anything, give up.
1316 	 */
1317 	if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1318 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1319 
1320 		if (busy) {
1321 			trace_xfs_alloc_near_busy(args);
1322 			xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1323 			goto restart;
1324 		}
1325 		trace_xfs_alloc_size_neither(args);
1326 		args->agbno = NULLAGBLOCK;
1327 		return 0;
1328 	}
1329 
1330 	/*
1331 	 * At this point we have selected a freespace entry, either to the
1332 	 * left or to the right.  If it's on the right, copy all the
1333 	 * useful variables to the "left" set so we only have one
1334 	 * copy of this code.
1335 	 */
1336 	if (bno_cur_gt) {
1337 		bno_cur_lt = bno_cur_gt;
1338 		bno_cur_gt = NULL;
1339 		ltbno = gtbno;
1340 		ltbnoa = gtbnoa;
1341 		ltlen = gtlen;
1342 		ltlena = gtlena;
1343 		j = 1;
1344 	} else
1345 		j = 0;
1346 
1347 	/*
1348 	 * Fix up the length and compute the useful address.
1349 	 */
1350 	args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1351 	xfs_alloc_fix_len(args);
1352 	rlen = args->len;
1353 	(void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1354 				     args->datatype, ltbnoa, ltlena, &ltnew);
1355 	ASSERT(ltnew >= ltbno);
1356 	ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1357 	ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1358 	ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno);
1359 	args->agbno = ltnew;
1360 
1361 	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1362 			ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1363 		goto error0;
1364 
1365 	if (j)
1366 		trace_xfs_alloc_near_greater(args);
1367 	else
1368 		trace_xfs_alloc_near_lesser(args);
1369 
1370 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1371 	xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1372 	return 0;
1373 
1374  error0:
1375 	trace_xfs_alloc_near_error(args);
1376 	if (cnt_cur != NULL)
1377 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1378 	if (bno_cur_lt != NULL)
1379 		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1380 	if (bno_cur_gt != NULL)
1381 		xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1382 	return error;
1383 }
1384 
1385 /*
1386  * Allocate a variable extent anywhere in the allocation group agno.
1387  * Extent's length (returned in len) will be between minlen and maxlen,
1388  * and of the form k * prod + mod unless there's nothing that large.
1389  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1390  */
1391 STATIC int				/* error */
xfs_alloc_ag_vextent_size(xfs_alloc_arg_t * args)1392 xfs_alloc_ag_vextent_size(
1393 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
1394 {
1395 	xfs_btree_cur_t	*bno_cur;	/* cursor for bno btree */
1396 	xfs_btree_cur_t	*cnt_cur;	/* cursor for cnt btree */
1397 	int		error;		/* error result */
1398 	xfs_agblock_t	fbno;		/* start of found freespace */
1399 	xfs_extlen_t	flen;		/* length of found freespace */
1400 	int		i;		/* temp status variable */
1401 	xfs_agblock_t	rbno;		/* returned block number */
1402 	xfs_extlen_t	rlen;		/* length of returned extent */
1403 	bool		busy;
1404 	unsigned	busy_gen;
1405 
1406 restart:
1407 	/*
1408 	 * Allocate and initialize a cursor for the by-size btree.
1409 	 */
1410 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1411 		args->agno, XFS_BTNUM_CNT);
1412 	bno_cur = NULL;
1413 	busy = false;
1414 
1415 	/*
1416 	 * Look for an entry >= maxlen+alignment-1 blocks.
1417 	 */
1418 	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1419 			args->maxlen + args->alignment - 1, &i)))
1420 		goto error0;
1421 
1422 	/*
1423 	 * If none then we have to settle for a smaller extent. In the case that
1424 	 * there are no large extents, this will return the last entry in the
1425 	 * tree unless the tree is empty. In the case that there are only busy
1426 	 * large extents, this will return the largest small extent unless there
1427 	 * are no smaller extents available.
1428 	 */
1429 	if (!i) {
1430 		error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1431 						   &fbno, &flen, &i);
1432 		if (error)
1433 			goto error0;
1434 		if (i == 0 || flen == 0) {
1435 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1436 			trace_xfs_alloc_size_noentry(args);
1437 			return 0;
1438 		}
1439 		ASSERT(i == 1);
1440 		busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1441 				&rlen, &busy_gen);
1442 	} else {
1443 		/*
1444 		 * Search for a non-busy extent that is large enough.
1445 		 */
1446 		for (;;) {
1447 			error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1448 			if (error)
1449 				goto error0;
1450 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1451 
1452 			busy = xfs_alloc_compute_aligned(args, fbno, flen,
1453 					&rbno, &rlen, &busy_gen);
1454 
1455 			if (rlen >= args->maxlen)
1456 				break;
1457 
1458 			error = xfs_btree_increment(cnt_cur, 0, &i);
1459 			if (error)
1460 				goto error0;
1461 			if (i == 0) {
1462 				/*
1463 				 * Our only valid extents must have been busy.
1464 				 * Make it unbusy by forcing the log out and
1465 				 * retrying.
1466 				 */
1467 				xfs_btree_del_cursor(cnt_cur,
1468 						     XFS_BTREE_NOERROR);
1469 				trace_xfs_alloc_size_busy(args);
1470 				xfs_extent_busy_flush(args->mp,
1471 							args->pag, busy_gen);
1472 				goto restart;
1473 			}
1474 		}
1475 	}
1476 
1477 	/*
1478 	 * In the first case above, we got the last entry in the
1479 	 * by-size btree.  Now we check to see if the space hits maxlen
1480 	 * once aligned; if not, we search left for something better.
1481 	 * This can't happen in the second case above.
1482 	 */
1483 	rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1484 	XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1485 			(rlen <= flen && rbno + rlen <= fbno + flen), error0);
1486 	if (rlen < args->maxlen) {
1487 		xfs_agblock_t	bestfbno;
1488 		xfs_extlen_t	bestflen;
1489 		xfs_agblock_t	bestrbno;
1490 		xfs_extlen_t	bestrlen;
1491 
1492 		bestrlen = rlen;
1493 		bestrbno = rbno;
1494 		bestflen = flen;
1495 		bestfbno = fbno;
1496 		for (;;) {
1497 			if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1498 				goto error0;
1499 			if (i == 0)
1500 				break;
1501 			if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1502 					&i)))
1503 				goto error0;
1504 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1505 			if (flen < bestrlen)
1506 				break;
1507 			busy = xfs_alloc_compute_aligned(args, fbno, flen,
1508 					&rbno, &rlen, &busy_gen);
1509 			rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1510 			XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1511 				(rlen <= flen && rbno + rlen <= fbno + flen),
1512 				error0);
1513 			if (rlen > bestrlen) {
1514 				bestrlen = rlen;
1515 				bestrbno = rbno;
1516 				bestflen = flen;
1517 				bestfbno = fbno;
1518 				if (rlen == args->maxlen)
1519 					break;
1520 			}
1521 		}
1522 		if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1523 				&i)))
1524 			goto error0;
1525 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1526 		rlen = bestrlen;
1527 		rbno = bestrbno;
1528 		flen = bestflen;
1529 		fbno = bestfbno;
1530 	}
1531 	args->wasfromfl = 0;
1532 	/*
1533 	 * Fix up the length.
1534 	 */
1535 	args->len = rlen;
1536 	if (rlen < args->minlen) {
1537 		if (busy) {
1538 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1539 			trace_xfs_alloc_size_busy(args);
1540 			xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1541 			goto restart;
1542 		}
1543 		goto out_nominleft;
1544 	}
1545 	xfs_alloc_fix_len(args);
1546 
1547 	rlen = args->len;
1548 	XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
1549 	/*
1550 	 * Allocate and initialize a cursor for the by-block tree.
1551 	 */
1552 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1553 		args->agno, XFS_BTNUM_BNO);
1554 	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1555 			rbno, rlen, XFSA_FIXUP_CNT_OK)))
1556 		goto error0;
1557 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1558 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1559 	cnt_cur = bno_cur = NULL;
1560 	args->len = rlen;
1561 	args->agbno = rbno;
1562 	XFS_WANT_CORRUPTED_GOTO(args->mp,
1563 		args->agbno + args->len <=
1564 			be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1565 		error0);
1566 	trace_xfs_alloc_size_done(args);
1567 	return 0;
1568 
1569 error0:
1570 	trace_xfs_alloc_size_error(args);
1571 	if (cnt_cur)
1572 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1573 	if (bno_cur)
1574 		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1575 	return error;
1576 
1577 out_nominleft:
1578 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1579 	trace_xfs_alloc_size_nominleft(args);
1580 	args->agbno = NULLAGBLOCK;
1581 	return 0;
1582 }
1583 
1584 /*
1585  * Deal with the case where only small freespaces remain.
1586  * Either return the contents of the last freespace record,
1587  * or allocate space from the freelist if there is nothing in the tree.
1588  */
1589 STATIC int			/* error */
xfs_alloc_ag_vextent_small(xfs_alloc_arg_t * args,xfs_btree_cur_t * ccur,xfs_agblock_t * fbnop,xfs_extlen_t * flenp,int * stat)1590 xfs_alloc_ag_vextent_small(
1591 	xfs_alloc_arg_t	*args,	/* allocation argument structure */
1592 	xfs_btree_cur_t	*ccur,	/* by-size cursor */
1593 	xfs_agblock_t	*fbnop,	/* result block number */
1594 	xfs_extlen_t	*flenp,	/* result length */
1595 	int		*stat)	/* status: 0-freelist, 1-normal/none */
1596 {
1597 	struct xfs_owner_info	oinfo;
1598 	int		error;
1599 	xfs_agblock_t	fbno;
1600 	xfs_extlen_t	flen;
1601 	int		i;
1602 
1603 	if ((error = xfs_btree_decrement(ccur, 0, &i)))
1604 		goto error0;
1605 	if (i) {
1606 		if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1607 			goto error0;
1608 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1609 	}
1610 	/*
1611 	 * Nothing in the btree, try the freelist.  Make sure
1612 	 * to respect minleft even when pulling from the
1613 	 * freelist.
1614 	 */
1615 	else if (args->minlen == 1 && args->alignment == 1 &&
1616 		 args->resv != XFS_AG_RESV_AGFL &&
1617 		 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1618 		  > args->minleft)) {
1619 		error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1620 		if (error)
1621 			goto error0;
1622 		if (fbno != NULLAGBLOCK) {
1623 			xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
1624 			      xfs_alloc_allow_busy_reuse(args->datatype));
1625 
1626 			if (xfs_alloc_is_userdata(args->datatype)) {
1627 				xfs_buf_t	*bp;
1628 
1629 				bp = xfs_btree_get_bufs(args->mp, args->tp,
1630 					args->agno, fbno, 0);
1631 				if (!bp) {
1632 					error = -EFSCORRUPTED;
1633 					goto error0;
1634 				}
1635 				xfs_trans_binval(args->tp, bp);
1636 			}
1637 			args->len = 1;
1638 			args->agbno = fbno;
1639 			XFS_WANT_CORRUPTED_GOTO(args->mp,
1640 				args->agbno + args->len <=
1641 				be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1642 				error0);
1643 			args->wasfromfl = 1;
1644 			trace_xfs_alloc_small_freelist(args);
1645 
1646 			/*
1647 			 * If we're feeding an AGFL block to something that
1648 			 * doesn't live in the free space, we need to clear
1649 			 * out the OWN_AG rmap.
1650 			 */
1651 			xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
1652 			error = xfs_rmap_free(args->tp, args->agbp, args->agno,
1653 					fbno, 1, &oinfo);
1654 			if (error)
1655 				goto error0;
1656 
1657 			*stat = 0;
1658 			return 0;
1659 		}
1660 		/*
1661 		 * Nothing in the freelist.
1662 		 */
1663 		else
1664 			flen = 0;
1665 	}
1666 	/*
1667 	 * Can't allocate from the freelist for some reason.
1668 	 */
1669 	else {
1670 		fbno = NULLAGBLOCK;
1671 		flen = 0;
1672 	}
1673 	/*
1674 	 * Can't do the allocation, give up.
1675 	 */
1676 	if (flen < args->minlen) {
1677 		args->agbno = NULLAGBLOCK;
1678 		trace_xfs_alloc_small_notenough(args);
1679 		flen = 0;
1680 	}
1681 	*fbnop = fbno;
1682 	*flenp = flen;
1683 	*stat = 1;
1684 	trace_xfs_alloc_small_done(args);
1685 	return 0;
1686 
1687 error0:
1688 	trace_xfs_alloc_small_error(args);
1689 	return error;
1690 }
1691 
1692 /*
1693  * Free the extent starting at agno/bno for length.
1694  */
1695 STATIC int
xfs_free_ag_extent(xfs_trans_t * tp,xfs_buf_t * agbp,xfs_agnumber_t agno,xfs_agblock_t bno,xfs_extlen_t len,struct xfs_owner_info * oinfo,enum xfs_ag_resv_type type)1696 xfs_free_ag_extent(
1697 	xfs_trans_t		*tp,
1698 	xfs_buf_t		*agbp,
1699 	xfs_agnumber_t		agno,
1700 	xfs_agblock_t		bno,
1701 	xfs_extlen_t		len,
1702 	struct xfs_owner_info	*oinfo,
1703 	enum xfs_ag_resv_type	type)
1704 {
1705 	xfs_btree_cur_t	*bno_cur;	/* cursor for by-block btree */
1706 	xfs_btree_cur_t	*cnt_cur;	/* cursor for by-size btree */
1707 	int		error;		/* error return value */
1708 	xfs_agblock_t	gtbno;		/* start of right neighbor block */
1709 	xfs_extlen_t	gtlen;		/* length of right neighbor block */
1710 	int		haveleft;	/* have a left neighbor block */
1711 	int		haveright;	/* have a right neighbor block */
1712 	int		i;		/* temp, result code */
1713 	xfs_agblock_t	ltbno;		/* start of left neighbor block */
1714 	xfs_extlen_t	ltlen;		/* length of left neighbor block */
1715 	xfs_mount_t	*mp;		/* mount point struct for filesystem */
1716 	xfs_agblock_t	nbno;		/* new starting block of freespace */
1717 	xfs_extlen_t	nlen;		/* new length of freespace */
1718 	xfs_perag_t	*pag;		/* per allocation group data */
1719 
1720 	bno_cur = cnt_cur = NULL;
1721 	mp = tp->t_mountp;
1722 
1723 	if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1724 		error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
1725 		if (error)
1726 			goto error0;
1727 	}
1728 
1729 	/*
1730 	 * Allocate and initialize a cursor for the by-block btree.
1731 	 */
1732 	bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1733 	/*
1734 	 * Look for a neighboring block on the left (lower block numbers)
1735 	 * that is contiguous with this space.
1736 	 */
1737 	if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1738 		goto error0;
1739 	if (haveleft) {
1740 		/*
1741 		 * There is a block to our left.
1742 		 */
1743 		if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1744 			goto error0;
1745 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1746 		/*
1747 		 * It's not contiguous, though.
1748 		 */
1749 		if (ltbno + ltlen < bno)
1750 			haveleft = 0;
1751 		else {
1752 			/*
1753 			 * If this failure happens the request to free this
1754 			 * space was invalid, it's (partly) already free.
1755 			 * Very bad.
1756 			 */
1757 			XFS_WANT_CORRUPTED_GOTO(mp,
1758 						ltbno + ltlen <= bno, error0);
1759 		}
1760 	}
1761 	/*
1762 	 * Look for a neighboring block on the right (higher block numbers)
1763 	 * that is contiguous with this space.
1764 	 */
1765 	if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1766 		goto error0;
1767 	if (haveright) {
1768 		/*
1769 		 * There is a block to our right.
1770 		 */
1771 		if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1772 			goto error0;
1773 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1774 		/*
1775 		 * It's not contiguous, though.
1776 		 */
1777 		if (bno + len < gtbno)
1778 			haveright = 0;
1779 		else {
1780 			/*
1781 			 * If this failure happens the request to free this
1782 			 * space was invalid, it's (partly) already free.
1783 			 * Very bad.
1784 			 */
1785 			XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
1786 		}
1787 	}
1788 	/*
1789 	 * Now allocate and initialize a cursor for the by-size tree.
1790 	 */
1791 	cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1792 	/*
1793 	 * Have both left and right contiguous neighbors.
1794 	 * Merge all three into a single free block.
1795 	 */
1796 	if (haveleft && haveright) {
1797 		/*
1798 		 * Delete the old by-size entry on the left.
1799 		 */
1800 		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1801 			goto error0;
1802 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1803 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1804 			goto error0;
1805 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1806 		/*
1807 		 * Delete the old by-size entry on the right.
1808 		 */
1809 		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1810 			goto error0;
1811 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1812 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1813 			goto error0;
1814 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1815 		/*
1816 		 * Delete the old by-block entry for the right block.
1817 		 */
1818 		if ((error = xfs_btree_delete(bno_cur, &i)))
1819 			goto error0;
1820 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1821 		/*
1822 		 * Move the by-block cursor back to the left neighbor.
1823 		 */
1824 		if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1825 			goto error0;
1826 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1827 #ifdef DEBUG
1828 		/*
1829 		 * Check that this is the right record: delete didn't
1830 		 * mangle the cursor.
1831 		 */
1832 		{
1833 			xfs_agblock_t	xxbno;
1834 			xfs_extlen_t	xxlen;
1835 
1836 			if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1837 					&i)))
1838 				goto error0;
1839 			XFS_WANT_CORRUPTED_GOTO(mp,
1840 				i == 1 && xxbno == ltbno && xxlen == ltlen,
1841 				error0);
1842 		}
1843 #endif
1844 		/*
1845 		 * Update remaining by-block entry to the new, joined block.
1846 		 */
1847 		nbno = ltbno;
1848 		nlen = len + ltlen + gtlen;
1849 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1850 			goto error0;
1851 	}
1852 	/*
1853 	 * Have only a left contiguous neighbor.
1854 	 * Merge it together with the new freespace.
1855 	 */
1856 	else if (haveleft) {
1857 		/*
1858 		 * Delete the old by-size entry on the left.
1859 		 */
1860 		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1861 			goto error0;
1862 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1863 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1864 			goto error0;
1865 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1866 		/*
1867 		 * Back up the by-block cursor to the left neighbor, and
1868 		 * update its length.
1869 		 */
1870 		if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1871 			goto error0;
1872 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1873 		nbno = ltbno;
1874 		nlen = len + ltlen;
1875 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1876 			goto error0;
1877 	}
1878 	/*
1879 	 * Have only a right contiguous neighbor.
1880 	 * Merge it together with the new freespace.
1881 	 */
1882 	else if (haveright) {
1883 		/*
1884 		 * Delete the old by-size entry on the right.
1885 		 */
1886 		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1887 			goto error0;
1888 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1889 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1890 			goto error0;
1891 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1892 		/*
1893 		 * Update the starting block and length of the right
1894 		 * neighbor in the by-block tree.
1895 		 */
1896 		nbno = bno;
1897 		nlen = len + gtlen;
1898 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1899 			goto error0;
1900 	}
1901 	/*
1902 	 * No contiguous neighbors.
1903 	 * Insert the new freespace into the by-block tree.
1904 	 */
1905 	else {
1906 		nbno = bno;
1907 		nlen = len;
1908 		if ((error = xfs_btree_insert(bno_cur, &i)))
1909 			goto error0;
1910 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1911 	}
1912 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1913 	bno_cur = NULL;
1914 	/*
1915 	 * In all cases we need to insert the new freespace in the by-size tree.
1916 	 */
1917 	if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1918 		goto error0;
1919 	XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
1920 	if ((error = xfs_btree_insert(cnt_cur, &i)))
1921 		goto error0;
1922 	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1923 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1924 	cnt_cur = NULL;
1925 
1926 	/*
1927 	 * Update the freespace totals in the ag and superblock.
1928 	 */
1929 	pag = xfs_perag_get(mp, agno);
1930 	error = xfs_alloc_update_counters(tp, pag, agbp, len);
1931 	xfs_ag_resv_free_extent(pag, type, tp, len);
1932 	xfs_perag_put(pag);
1933 	if (error)
1934 		goto error0;
1935 
1936 	XFS_STATS_INC(mp, xs_freex);
1937 	XFS_STATS_ADD(mp, xs_freeb, len);
1938 
1939 	trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
1940 
1941 	return 0;
1942 
1943  error0:
1944 	trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
1945 	if (bno_cur)
1946 		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1947 	if (cnt_cur)
1948 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1949 	return error;
1950 }
1951 
1952 /*
1953  * Visible (exported) allocation/free functions.
1954  * Some of these are used just by xfs_alloc_btree.c and this file.
1955  */
1956 
1957 /*
1958  * Compute and fill in value of m_ag_maxlevels.
1959  */
1960 void
xfs_alloc_compute_maxlevels(xfs_mount_t * mp)1961 xfs_alloc_compute_maxlevels(
1962 	xfs_mount_t	*mp)	/* file system mount structure */
1963 {
1964 	mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
1965 			(mp->m_sb.sb_agblocks + 1) / 2);
1966 }
1967 
1968 /*
1969  * Find the length of the longest extent in an AG.  The 'need' parameter
1970  * specifies how much space we're going to need for the AGFL and the
1971  * 'reserved' parameter tells us how many blocks in this AG are reserved for
1972  * other callers.
1973  */
1974 xfs_extlen_t
xfs_alloc_longest_free_extent(struct xfs_perag * pag,xfs_extlen_t need,xfs_extlen_t reserved)1975 xfs_alloc_longest_free_extent(
1976 	struct xfs_perag	*pag,
1977 	xfs_extlen_t		need,
1978 	xfs_extlen_t		reserved)
1979 {
1980 	xfs_extlen_t		delta = 0;
1981 
1982 	/*
1983 	 * If the AGFL needs a recharge, we'll have to subtract that from the
1984 	 * longest extent.
1985 	 */
1986 	if (need > pag->pagf_flcount)
1987 		delta = need - pag->pagf_flcount;
1988 
1989 	/*
1990 	 * If we cannot maintain others' reservations with space from the
1991 	 * not-longest freesp extents, we'll have to subtract /that/ from
1992 	 * the longest extent too.
1993 	 */
1994 	if (pag->pagf_freeblks - pag->pagf_longest < reserved)
1995 		delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
1996 
1997 	/*
1998 	 * If the longest extent is long enough to satisfy all the
1999 	 * reservations and AGFL rules in place, we can return this extent.
2000 	 */
2001 	if (pag->pagf_longest > delta)
2002 		return pag->pagf_longest - delta;
2003 
2004 	/* Otherwise, let the caller try for 1 block if there's space. */
2005 	return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
2006 }
2007 
2008 unsigned int
xfs_alloc_min_freelist(struct xfs_mount * mp,struct xfs_perag * pag)2009 xfs_alloc_min_freelist(
2010 	struct xfs_mount	*mp,
2011 	struct xfs_perag	*pag)
2012 {
2013 	unsigned int		min_free;
2014 
2015 	/* space needed by-bno freespace btree */
2016 	min_free = min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_BNOi] + 1,
2017 				       mp->m_ag_maxlevels);
2018 	/* space needed by-size freespace btree */
2019 	min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
2020 				       mp->m_ag_maxlevels);
2021 	/* space needed reverse mapping used space btree */
2022 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2023 		min_free += min_t(unsigned int,
2024 				  pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
2025 				  mp->m_rmap_maxlevels);
2026 
2027 	return min_free;
2028 }
2029 
2030 /*
2031  * Check if the operation we are fixing up the freelist for should go ahead or
2032  * not. If we are freeing blocks, we always allow it, otherwise the allocation
2033  * is dependent on whether the size and shape of free space available will
2034  * permit the requested allocation to take place.
2035  */
2036 static bool
xfs_alloc_space_available(struct xfs_alloc_arg * args,xfs_extlen_t min_free,int flags)2037 xfs_alloc_space_available(
2038 	struct xfs_alloc_arg	*args,
2039 	xfs_extlen_t		min_free,
2040 	int			flags)
2041 {
2042 	struct xfs_perag	*pag = args->pag;
2043 	xfs_extlen_t		alloc_len, longest;
2044 	xfs_extlen_t		reservation; /* blocks that are still reserved */
2045 	int			available;
2046 
2047 	if (flags & XFS_ALLOC_FLAG_FREEING)
2048 		return true;
2049 
2050 	reservation = xfs_ag_resv_needed(pag, args->resv);
2051 
2052 	/* do we have enough contiguous free space for the allocation? */
2053 	alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2054 	longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2055 	if (longest < alloc_len)
2056 		return false;
2057 
2058 	/* do we have enough free space remaining for the allocation? */
2059 	available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
2060 			  reservation - min_free - args->minleft);
2061 	if (available < (int)max(args->total, alloc_len))
2062 		return false;
2063 
2064 	/*
2065 	 * Clamp maxlen to the amount of free space available for the actual
2066 	 * extent allocation.
2067 	 */
2068 	if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2069 		args->maxlen = available;
2070 		ASSERT(args->maxlen > 0);
2071 		ASSERT(args->maxlen >= args->minlen);
2072 	}
2073 
2074 	return true;
2075 }
2076 
2077 int
xfs_free_agfl_block(struct xfs_trans * tp,xfs_agnumber_t agno,xfs_agblock_t agbno,struct xfs_buf * agbp,struct xfs_owner_info * oinfo)2078 xfs_free_agfl_block(
2079 	struct xfs_trans	*tp,
2080 	xfs_agnumber_t		agno,
2081 	xfs_agblock_t		agbno,
2082 	struct xfs_buf		*agbp,
2083 	struct xfs_owner_info	*oinfo)
2084 {
2085 	int			error;
2086 	struct xfs_buf		*bp;
2087 
2088 	error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
2089 				   XFS_AG_RESV_AGFL);
2090 	if (error)
2091 		return error;
2092 
2093 	bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno, 0);
2094 	if (!bp)
2095 		return -EFSCORRUPTED;
2096 	xfs_trans_binval(tp, bp);
2097 
2098 	return 0;
2099 }
2100 
2101 /*
2102  * Check the agfl fields of the agf for inconsistency or corruption. The purpose
2103  * is to detect an agfl header padding mismatch between current and early v5
2104  * kernels. This problem manifests as a 1-slot size difference between the
2105  * on-disk flcount and the active [first, last] range of a wrapped agfl. This
2106  * may also catch variants of agfl count corruption unrelated to padding. Either
2107  * way, we'll reset the agfl and warn the user.
2108  *
2109  * Return true if a reset is required before the agfl can be used, false
2110  * otherwise.
2111  */
2112 static bool
xfs_agfl_needs_reset(struct xfs_mount * mp,struct xfs_agf * agf)2113 xfs_agfl_needs_reset(
2114 	struct xfs_mount	*mp,
2115 	struct xfs_agf		*agf)
2116 {
2117 	uint32_t		f = be32_to_cpu(agf->agf_flfirst);
2118 	uint32_t		l = be32_to_cpu(agf->agf_fllast);
2119 	uint32_t		c = be32_to_cpu(agf->agf_flcount);
2120 	int			agfl_size = xfs_agfl_size(mp);
2121 	int			active;
2122 
2123 	/* no agfl header on v4 supers */
2124 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2125 		return false;
2126 
2127 	/*
2128 	 * The agf read verifier catches severe corruption of these fields.
2129 	 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2130 	 * the verifier allows it.
2131 	 */
2132 	if (f >= agfl_size || l >= agfl_size)
2133 		return true;
2134 	if (c > agfl_size)
2135 		return true;
2136 
2137 	/*
2138 	 * Check consistency between the on-disk count and the active range. An
2139 	 * agfl padding mismatch manifests as an inconsistent flcount.
2140 	 */
2141 	if (c && l >= f)
2142 		active = l - f + 1;
2143 	else if (c)
2144 		active = agfl_size - f + l + 1;
2145 	else
2146 		active = 0;
2147 
2148 	return active != c;
2149 }
2150 
2151 /*
2152  * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2153  * agfl content cannot be trusted. Warn the user that a repair is required to
2154  * recover leaked blocks.
2155  *
2156  * The purpose of this mechanism is to handle filesystems affected by the agfl
2157  * header padding mismatch problem. A reset keeps the filesystem online with a
2158  * relatively minor free space accounting inconsistency rather than suffer the
2159  * inevitable crash from use of an invalid agfl block.
2160  */
2161 static void
xfs_agfl_reset(struct xfs_trans * tp,struct xfs_buf * agbp,struct xfs_perag * pag)2162 xfs_agfl_reset(
2163 	struct xfs_trans	*tp,
2164 	struct xfs_buf		*agbp,
2165 	struct xfs_perag	*pag)
2166 {
2167 	struct xfs_mount	*mp = tp->t_mountp;
2168 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
2169 
2170 	ASSERT(pag->pagf_agflreset);
2171 	trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2172 
2173 	xfs_warn(mp,
2174 	       "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2175 	       "Please unmount and run xfs_repair.",
2176 	         pag->pag_agno, pag->pagf_flcount);
2177 
2178 	agf->agf_flfirst = 0;
2179 	agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2180 	agf->agf_flcount = 0;
2181 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2182 				    XFS_AGF_FLCOUNT);
2183 
2184 	pag->pagf_flcount = 0;
2185 	pag->pagf_agflreset = false;
2186 }
2187 
2188 /*
2189  * Defer an AGFL block free. This is effectively equivalent to
2190  * xfs_bmap_add_free() with some special handling particular to AGFL blocks.
2191  *
2192  * Deferring AGFL frees helps prevent log reservation overruns due to too many
2193  * allocation operations in a transaction. AGFL frees are prone to this problem
2194  * because for one they are always freed one at a time. Further, an immediate
2195  * AGFL block free can cause a btree join and require another block free before
2196  * the real allocation can proceed. Deferring the free disconnects freeing up
2197  * the AGFL slot from freeing the block.
2198  */
2199 STATIC void
xfs_defer_agfl_block(struct xfs_trans * tp,xfs_agnumber_t agno,xfs_fsblock_t agbno,struct xfs_owner_info * oinfo)2200 xfs_defer_agfl_block(
2201 	struct xfs_trans		*tp,
2202 	xfs_agnumber_t			agno,
2203 	xfs_fsblock_t			agbno,
2204 	struct xfs_owner_info		*oinfo)
2205 {
2206 	struct xfs_mount		*mp = tp->t_mountp;
2207 	struct xfs_extent_free_item	*new;		/* new element */
2208 
2209 	ASSERT(xfs_bmap_free_item_zone != NULL);
2210 	ASSERT(oinfo != NULL);
2211 
2212 	new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
2213 	new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
2214 	new->xefi_blockcount = 1;
2215 	new->xefi_oinfo = *oinfo;
2216 
2217 	trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
2218 
2219 	xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
2220 }
2221 
2222 /*
2223  * Decide whether to use this allocation group for this allocation.
2224  * If so, fix up the btree freelist's size.
2225  */
2226 int			/* error */
xfs_alloc_fix_freelist(struct xfs_alloc_arg * args,int flags)2227 xfs_alloc_fix_freelist(
2228 	struct xfs_alloc_arg	*args,	/* allocation argument structure */
2229 	int			flags)	/* XFS_ALLOC_FLAG_... */
2230 {
2231 	struct xfs_mount	*mp = args->mp;
2232 	struct xfs_perag	*pag = args->pag;
2233 	struct xfs_trans	*tp = args->tp;
2234 	struct xfs_buf		*agbp = NULL;
2235 	struct xfs_buf		*agflbp = NULL;
2236 	struct xfs_alloc_arg	targs;	/* local allocation arguments */
2237 	xfs_agblock_t		bno;	/* freelist block */
2238 	xfs_extlen_t		need;	/* total blocks needed in freelist */
2239 	int			error = 0;
2240 
2241 	if (!pag->pagf_init) {
2242 		error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2243 		if (error)
2244 			goto out_no_agbp;
2245 		if (!pag->pagf_init) {
2246 			ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2247 			ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2248 			goto out_agbp_relse;
2249 		}
2250 	}
2251 
2252 	/*
2253 	 * If this is a metadata preferred pag and we are user data then try
2254 	 * somewhere else if we are not being asked to try harder at this
2255 	 * point
2256 	 */
2257 	if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) &&
2258 	    (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2259 		ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2260 		goto out_agbp_relse;
2261 	}
2262 
2263 	need = xfs_alloc_min_freelist(mp, pag);
2264 	if (!xfs_alloc_space_available(args, need, flags |
2265 			XFS_ALLOC_FLAG_CHECK))
2266 		goto out_agbp_relse;
2267 
2268 	/*
2269 	 * Get the a.g. freespace buffer.
2270 	 * Can fail if we're not blocking on locks, and it's held.
2271 	 */
2272 	if (!agbp) {
2273 		error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2274 		if (error)
2275 			goto out_no_agbp;
2276 		if (!agbp) {
2277 			ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2278 			ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2279 			goto out_no_agbp;
2280 		}
2281 	}
2282 
2283 	/* reset a padding mismatched agfl before final free space check */
2284 	if (pag->pagf_agflreset)
2285 		xfs_agfl_reset(tp, agbp, pag);
2286 
2287 	/* If there isn't enough total space or single-extent, reject it. */
2288 	need = xfs_alloc_min_freelist(mp, pag);
2289 	if (!xfs_alloc_space_available(args, need, flags))
2290 		goto out_agbp_relse;
2291 
2292 	/*
2293 	 * Make the freelist shorter if it's too long.
2294 	 *
2295 	 * Note that from this point onwards, we will always release the agf and
2296 	 * agfl buffers on error. This handles the case where we error out and
2297 	 * the buffers are clean or may not have been joined to the transaction
2298 	 * and hence need to be released manually. If they have been joined to
2299 	 * the transaction, then xfs_trans_brelse() will handle them
2300 	 * appropriately based on the recursion count and dirty state of the
2301 	 * buffer.
2302 	 *
2303 	 * XXX (dgc): When we have lots of free space, does this buy us
2304 	 * anything other than extra overhead when we need to put more blocks
2305 	 * back on the free list? Maybe we should only do this when space is
2306 	 * getting low or the AGFL is more than half full?
2307 	 *
2308 	 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2309 	 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2310 	 * updating the rmapbt.  Both flags are used in xfs_repair while we're
2311 	 * rebuilding the rmapbt, and neither are used by the kernel.  They're
2312 	 * both required to ensure that rmaps are correctly recorded for the
2313 	 * regenerated AGFL, bnobt, and cntbt.  See repair/phase5.c and
2314 	 * repair/rmap.c in xfsprogs for details.
2315 	 */
2316 	memset(&targs, 0, sizeof(targs));
2317 	if (flags & XFS_ALLOC_FLAG_NORMAP)
2318 		xfs_rmap_skip_owner_update(&targs.oinfo);
2319 	else
2320 		xfs_rmap_ag_owner(&targs.oinfo, XFS_RMAP_OWN_AG);
2321 	while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
2322 		error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2323 		if (error)
2324 			goto out_agbp_relse;
2325 
2326 		/* defer agfl frees */
2327 		xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
2328 	}
2329 
2330 	targs.tp = tp;
2331 	targs.mp = mp;
2332 	targs.agbp = agbp;
2333 	targs.agno = args->agno;
2334 	targs.alignment = targs.minlen = targs.prod = 1;
2335 	targs.type = XFS_ALLOCTYPE_THIS_AG;
2336 	targs.pag = pag;
2337 	error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2338 	if (error)
2339 		goto out_agbp_relse;
2340 
2341 	/* Make the freelist longer if it's too short. */
2342 	while (pag->pagf_flcount < need) {
2343 		targs.agbno = 0;
2344 		targs.maxlen = need - pag->pagf_flcount;
2345 		targs.resv = XFS_AG_RESV_AGFL;
2346 
2347 		/* Allocate as many blocks as possible at once. */
2348 		error = xfs_alloc_ag_vextent(&targs);
2349 		if (error)
2350 			goto out_agflbp_relse;
2351 
2352 		/*
2353 		 * Stop if we run out.  Won't happen if callers are obeying
2354 		 * the restrictions correctly.  Can happen for free calls
2355 		 * on a completely full ag.
2356 		 */
2357 		if (targs.agbno == NULLAGBLOCK) {
2358 			if (flags & XFS_ALLOC_FLAG_FREEING)
2359 				break;
2360 			goto out_agflbp_relse;
2361 		}
2362 		/*
2363 		 * Put each allocated block on the list.
2364 		 */
2365 		for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2366 			error = xfs_alloc_put_freelist(tp, agbp,
2367 							agflbp, bno, 0);
2368 			if (error)
2369 				goto out_agflbp_relse;
2370 		}
2371 	}
2372 	xfs_trans_brelse(tp, agflbp);
2373 	args->agbp = agbp;
2374 	return 0;
2375 
2376 out_agflbp_relse:
2377 	xfs_trans_brelse(tp, agflbp);
2378 out_agbp_relse:
2379 	if (agbp)
2380 		xfs_trans_brelse(tp, agbp);
2381 out_no_agbp:
2382 	args->agbp = NULL;
2383 	return error;
2384 }
2385 
2386 /*
2387  * Get a block from the freelist.
2388  * Returns with the buffer for the block gotten.
2389  */
2390 int				/* error */
xfs_alloc_get_freelist(xfs_trans_t * tp,xfs_buf_t * agbp,xfs_agblock_t * bnop,int btreeblk)2391 xfs_alloc_get_freelist(
2392 	xfs_trans_t	*tp,	/* transaction pointer */
2393 	xfs_buf_t	*agbp,	/* buffer containing the agf structure */
2394 	xfs_agblock_t	*bnop,	/* block address retrieved from freelist */
2395 	int		btreeblk) /* destination is a AGF btree */
2396 {
2397 	xfs_agf_t	*agf;	/* a.g. freespace structure */
2398 	xfs_buf_t	*agflbp;/* buffer for a.g. freelist structure */
2399 	xfs_agblock_t	bno;	/* block number returned */
2400 	__be32		*agfl_bno;
2401 	int		error;
2402 	int		logflags;
2403 	xfs_mount_t	*mp = tp->t_mountp;
2404 	xfs_perag_t	*pag;	/* per allocation group data */
2405 
2406 	/*
2407 	 * Freelist is empty, give up.
2408 	 */
2409 	agf = XFS_BUF_TO_AGF(agbp);
2410 	if (!agf->agf_flcount) {
2411 		*bnop = NULLAGBLOCK;
2412 		return 0;
2413 	}
2414 	/*
2415 	 * Read the array of free blocks.
2416 	 */
2417 	error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2418 				    &agflbp);
2419 	if (error)
2420 		return error;
2421 
2422 
2423 	/*
2424 	 * Get the block number and update the data structures.
2425 	 */
2426 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2427 	bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2428 	be32_add_cpu(&agf->agf_flfirst, 1);
2429 	xfs_trans_brelse(tp, agflbp);
2430 	if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
2431 		agf->agf_flfirst = 0;
2432 
2433 	pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2434 	ASSERT(!pag->pagf_agflreset);
2435 	be32_add_cpu(&agf->agf_flcount, -1);
2436 	xfs_trans_agflist_delta(tp, -1);
2437 	pag->pagf_flcount--;
2438 	xfs_perag_put(pag);
2439 
2440 	logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2441 	if (btreeblk) {
2442 		be32_add_cpu(&agf->agf_btreeblks, 1);
2443 		pag->pagf_btreeblks++;
2444 		logflags |= XFS_AGF_BTREEBLKS;
2445 	}
2446 
2447 	xfs_alloc_log_agf(tp, agbp, logflags);
2448 	*bnop = bno;
2449 
2450 	return 0;
2451 }
2452 
2453 /*
2454  * Log the given fields from the agf structure.
2455  */
2456 void
xfs_alloc_log_agf(xfs_trans_t * tp,xfs_buf_t * bp,int fields)2457 xfs_alloc_log_agf(
2458 	xfs_trans_t	*tp,	/* transaction pointer */
2459 	xfs_buf_t	*bp,	/* buffer for a.g. freelist header */
2460 	int		fields)	/* mask of fields to be logged (XFS_AGF_...) */
2461 {
2462 	int	first;		/* first byte offset */
2463 	int	last;		/* last byte offset */
2464 	static const short	offsets[] = {
2465 		offsetof(xfs_agf_t, agf_magicnum),
2466 		offsetof(xfs_agf_t, agf_versionnum),
2467 		offsetof(xfs_agf_t, agf_seqno),
2468 		offsetof(xfs_agf_t, agf_length),
2469 		offsetof(xfs_agf_t, agf_roots[0]),
2470 		offsetof(xfs_agf_t, agf_levels[0]),
2471 		offsetof(xfs_agf_t, agf_flfirst),
2472 		offsetof(xfs_agf_t, agf_fllast),
2473 		offsetof(xfs_agf_t, agf_flcount),
2474 		offsetof(xfs_agf_t, agf_freeblks),
2475 		offsetof(xfs_agf_t, agf_longest),
2476 		offsetof(xfs_agf_t, agf_btreeblks),
2477 		offsetof(xfs_agf_t, agf_uuid),
2478 		offsetof(xfs_agf_t, agf_rmap_blocks),
2479 		offsetof(xfs_agf_t, agf_refcount_blocks),
2480 		offsetof(xfs_agf_t, agf_refcount_root),
2481 		offsetof(xfs_agf_t, agf_refcount_level),
2482 		/* needed so that we don't log the whole rest of the structure: */
2483 		offsetof(xfs_agf_t, agf_spare64),
2484 		sizeof(xfs_agf_t)
2485 	};
2486 
2487 	trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2488 
2489 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
2490 
2491 	xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2492 	xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2493 }
2494 
2495 /*
2496  * Interface for inode allocation to force the pag data to be initialized.
2497  */
2498 int					/* error */
xfs_alloc_pagf_init(xfs_mount_t * mp,xfs_trans_t * tp,xfs_agnumber_t agno,int flags)2499 xfs_alloc_pagf_init(
2500 	xfs_mount_t		*mp,	/* file system mount structure */
2501 	xfs_trans_t		*tp,	/* transaction pointer */
2502 	xfs_agnumber_t		agno,	/* allocation group number */
2503 	int			flags)	/* XFS_ALLOC_FLAGS_... */
2504 {
2505 	xfs_buf_t		*bp;
2506 	int			error;
2507 
2508 	if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2509 		return error;
2510 	if (bp)
2511 		xfs_trans_brelse(tp, bp);
2512 	return 0;
2513 }
2514 
2515 /*
2516  * Put the block on the freelist for the allocation group.
2517  */
2518 int					/* error */
xfs_alloc_put_freelist(xfs_trans_t * tp,xfs_buf_t * agbp,xfs_buf_t * agflbp,xfs_agblock_t bno,int btreeblk)2519 xfs_alloc_put_freelist(
2520 	xfs_trans_t		*tp,	/* transaction pointer */
2521 	xfs_buf_t		*agbp,	/* buffer for a.g. freelist header */
2522 	xfs_buf_t		*agflbp,/* buffer for a.g. free block array */
2523 	xfs_agblock_t		bno,	/* block being freed */
2524 	int			btreeblk) /* block came from a AGF btree */
2525 {
2526 	xfs_agf_t		*agf;	/* a.g. freespace structure */
2527 	__be32			*blockp;/* pointer to array entry */
2528 	int			error;
2529 	int			logflags;
2530 	xfs_mount_t		*mp;	/* mount structure */
2531 	xfs_perag_t		*pag;	/* per allocation group data */
2532 	__be32			*agfl_bno;
2533 	int			startoff;
2534 
2535 	agf = XFS_BUF_TO_AGF(agbp);
2536 	mp = tp->t_mountp;
2537 
2538 	if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2539 			be32_to_cpu(agf->agf_seqno), &agflbp)))
2540 		return error;
2541 	be32_add_cpu(&agf->agf_fllast, 1);
2542 	if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
2543 		agf->agf_fllast = 0;
2544 
2545 	pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2546 	ASSERT(!pag->pagf_agflreset);
2547 	be32_add_cpu(&agf->agf_flcount, 1);
2548 	xfs_trans_agflist_delta(tp, 1);
2549 	pag->pagf_flcount++;
2550 
2551 	logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2552 	if (btreeblk) {
2553 		be32_add_cpu(&agf->agf_btreeblks, -1);
2554 		pag->pagf_btreeblks--;
2555 		logflags |= XFS_AGF_BTREEBLKS;
2556 	}
2557 	xfs_perag_put(pag);
2558 
2559 	xfs_alloc_log_agf(tp, agbp, logflags);
2560 
2561 	ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
2562 
2563 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2564 	blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
2565 	*blockp = cpu_to_be32(bno);
2566 	startoff = (char *)blockp - (char *)agflbp->b_addr;
2567 
2568 	xfs_alloc_log_agf(tp, agbp, logflags);
2569 
2570 	xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
2571 	xfs_trans_log_buf(tp, agflbp, startoff,
2572 			  startoff + sizeof(xfs_agblock_t) - 1);
2573 	return 0;
2574 }
2575 
2576 static xfs_failaddr_t
xfs_agf_verify(struct xfs_buf * bp)2577 xfs_agf_verify(
2578 	struct xfs_buf		*bp)
2579 {
2580 	struct xfs_mount	*mp = bp->b_target->bt_mount;
2581 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(bp);
2582 
2583 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
2584 		if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
2585 			return __this_address;
2586 		if (!xfs_log_check_lsn(mp,
2587 				be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
2588 			return __this_address;
2589 	}
2590 
2591 	if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2592 	      XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2593 	      be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2594 	      be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
2595 	      be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
2596 	      be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
2597 		return __this_address;
2598 
2599 	if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
2600 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
2601 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
2602 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
2603 		return __this_address;
2604 
2605 	if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
2606 	    (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
2607 	     be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
2608 		return __this_address;
2609 
2610 	/*
2611 	 * during growfs operations, the perag is not fully initialised,
2612 	 * so we can't use it for any useful checking. growfs ensures we can't
2613 	 * use it by using uncached buffers that don't have the perag attached
2614 	 * so we can detect and avoid this problem.
2615 	 */
2616 	if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2617 		return __this_address;
2618 
2619 	if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2620 	    be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2621 		return __this_address;
2622 
2623 	if (xfs_sb_version_hasreflink(&mp->m_sb) &&
2624 	    (be32_to_cpu(agf->agf_refcount_level) < 1 ||
2625 	     be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
2626 		return __this_address;
2627 
2628 	return NULL;
2629 
2630 }
2631 
2632 static void
xfs_agf_read_verify(struct xfs_buf * bp)2633 xfs_agf_read_verify(
2634 	struct xfs_buf	*bp)
2635 {
2636 	struct xfs_mount *mp = bp->b_target->bt_mount;
2637 	xfs_failaddr_t	fa;
2638 
2639 	if (xfs_sb_version_hascrc(&mp->m_sb) &&
2640 	    !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
2641 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2642 	else {
2643 		fa = xfs_agf_verify(bp);
2644 		if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
2645 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2646 	}
2647 }
2648 
2649 static void
xfs_agf_write_verify(struct xfs_buf * bp)2650 xfs_agf_write_verify(
2651 	struct xfs_buf	*bp)
2652 {
2653 	struct xfs_mount	*mp = bp->b_target->bt_mount;
2654 	struct xfs_buf_log_item	*bip = bp->b_log_item;
2655 	xfs_failaddr_t		fa;
2656 
2657 	fa = xfs_agf_verify(bp);
2658 	if (fa) {
2659 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2660 		return;
2661 	}
2662 
2663 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2664 		return;
2665 
2666 	if (bip)
2667 		XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2668 
2669 	xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
2670 }
2671 
2672 const struct xfs_buf_ops xfs_agf_buf_ops = {
2673 	.name = "xfs_agf",
2674 	.verify_read = xfs_agf_read_verify,
2675 	.verify_write = xfs_agf_write_verify,
2676 	.verify_struct = xfs_agf_verify,
2677 };
2678 
2679 /*
2680  * Read in the allocation group header (free/alloc section).
2681  */
2682 int					/* error */
xfs_read_agf(struct xfs_mount * mp,struct xfs_trans * tp,xfs_agnumber_t agno,int flags,struct xfs_buf ** bpp)2683 xfs_read_agf(
2684 	struct xfs_mount	*mp,	/* mount point structure */
2685 	struct xfs_trans	*tp,	/* transaction pointer */
2686 	xfs_agnumber_t		agno,	/* allocation group number */
2687 	int			flags,	/* XFS_BUF_ */
2688 	struct xfs_buf		**bpp)	/* buffer for the ag freelist header */
2689 {
2690 	int		error;
2691 
2692 	trace_xfs_read_agf(mp, agno);
2693 
2694 	ASSERT(agno != NULLAGNUMBER);
2695 	error = xfs_trans_read_buf(
2696 			mp, tp, mp->m_ddev_targp,
2697 			XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2698 			XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
2699 	if (error)
2700 		return error;
2701 	if (!*bpp)
2702 		return 0;
2703 
2704 	ASSERT(!(*bpp)->b_error);
2705 	xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2706 	return 0;
2707 }
2708 
2709 /*
2710  * Read in the allocation group header (free/alloc section).
2711  */
2712 int					/* error */
xfs_alloc_read_agf(struct xfs_mount * mp,struct xfs_trans * tp,xfs_agnumber_t agno,int flags,struct xfs_buf ** bpp)2713 xfs_alloc_read_agf(
2714 	struct xfs_mount	*mp,	/* mount point structure */
2715 	struct xfs_trans	*tp,	/* transaction pointer */
2716 	xfs_agnumber_t		agno,	/* allocation group number */
2717 	int			flags,	/* XFS_ALLOC_FLAG_... */
2718 	struct xfs_buf		**bpp)	/* buffer for the ag freelist header */
2719 {
2720 	struct xfs_agf		*agf;		/* ag freelist header */
2721 	struct xfs_perag	*pag;		/* per allocation group data */
2722 	int			error;
2723 
2724 	trace_xfs_alloc_read_agf(mp, agno);
2725 
2726 	ASSERT(agno != NULLAGNUMBER);
2727 	error = xfs_read_agf(mp, tp, agno,
2728 			(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2729 			bpp);
2730 	if (error)
2731 		return error;
2732 	if (!*bpp)
2733 		return 0;
2734 	ASSERT(!(*bpp)->b_error);
2735 
2736 	agf = XFS_BUF_TO_AGF(*bpp);
2737 	pag = xfs_perag_get(mp, agno);
2738 	if (!pag->pagf_init) {
2739 		pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2740 		pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2741 		pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2742 		pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2743 		pag->pagf_levels[XFS_BTNUM_BNOi] =
2744 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2745 		pag->pagf_levels[XFS_BTNUM_CNTi] =
2746 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2747 		pag->pagf_levels[XFS_BTNUM_RMAPi] =
2748 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
2749 		pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
2750 		pag->pagf_init = 1;
2751 		pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
2752 	}
2753 #ifdef DEBUG
2754 	else if (!XFS_FORCED_SHUTDOWN(mp)) {
2755 		ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
2756 		ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
2757 		ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2758 		ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2759 		ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2760 		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2761 		ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2762 		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2763 	}
2764 #endif
2765 	xfs_perag_put(pag);
2766 	return 0;
2767 }
2768 
2769 /*
2770  * Allocate an extent (variable-size).
2771  * Depending on the allocation type, we either look in a single allocation
2772  * group or loop over the allocation groups to find the result.
2773  */
2774 int				/* error */
xfs_alloc_vextent(struct xfs_alloc_arg * args)2775 xfs_alloc_vextent(
2776 	struct xfs_alloc_arg	*args)	/* allocation argument structure */
2777 {
2778 	xfs_agblock_t		agsize;	/* allocation group size */
2779 	int			error;
2780 	int			flags;	/* XFS_ALLOC_FLAG_... locking flags */
2781 	struct xfs_mount	*mp;	/* mount structure pointer */
2782 	xfs_agnumber_t		sagno;	/* starting allocation group number */
2783 	xfs_alloctype_t		type;	/* input allocation type */
2784 	int			bump_rotor = 0;
2785 	xfs_agnumber_t		rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2786 
2787 	mp = args->mp;
2788 	type = args->otype = args->type;
2789 	args->agbno = NULLAGBLOCK;
2790 	/*
2791 	 * Just fix this up, for the case where the last a.g. is shorter
2792 	 * (or there's only one a.g.) and the caller couldn't easily figure
2793 	 * that out (xfs_bmap_alloc).
2794 	 */
2795 	agsize = mp->m_sb.sb_agblocks;
2796 	if (args->maxlen > agsize)
2797 		args->maxlen = agsize;
2798 	if (args->alignment == 0)
2799 		args->alignment = 1;
2800 	ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2801 	ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2802 	ASSERT(args->minlen <= args->maxlen);
2803 	ASSERT(args->minlen <= agsize);
2804 	ASSERT(args->mod < args->prod);
2805 	if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2806 	    XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2807 	    args->minlen > args->maxlen || args->minlen > agsize ||
2808 	    args->mod >= args->prod) {
2809 		args->fsbno = NULLFSBLOCK;
2810 		trace_xfs_alloc_vextent_badargs(args);
2811 		return 0;
2812 	}
2813 
2814 	switch (type) {
2815 	case XFS_ALLOCTYPE_THIS_AG:
2816 	case XFS_ALLOCTYPE_NEAR_BNO:
2817 	case XFS_ALLOCTYPE_THIS_BNO:
2818 		/*
2819 		 * These three force us into a single a.g.
2820 		 */
2821 		args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2822 		args->pag = xfs_perag_get(mp, args->agno);
2823 		error = xfs_alloc_fix_freelist(args, 0);
2824 		if (error) {
2825 			trace_xfs_alloc_vextent_nofix(args);
2826 			goto error0;
2827 		}
2828 		if (!args->agbp) {
2829 			trace_xfs_alloc_vextent_noagbp(args);
2830 			break;
2831 		}
2832 		args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2833 		if ((error = xfs_alloc_ag_vextent(args)))
2834 			goto error0;
2835 		break;
2836 	case XFS_ALLOCTYPE_START_BNO:
2837 		/*
2838 		 * Try near allocation first, then anywhere-in-ag after
2839 		 * the first a.g. fails.
2840 		 */
2841 		if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
2842 		    (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2843 			args->fsbno = XFS_AGB_TO_FSB(mp,
2844 					((mp->m_agfrotor / rotorstep) %
2845 					mp->m_sb.sb_agcount), 0);
2846 			bump_rotor = 1;
2847 		}
2848 		args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2849 		args->type = XFS_ALLOCTYPE_NEAR_BNO;
2850 		/* FALLTHROUGH */
2851 	case XFS_ALLOCTYPE_FIRST_AG:
2852 		/*
2853 		 * Rotate through the allocation groups looking for a winner.
2854 		 */
2855 		if (type == XFS_ALLOCTYPE_FIRST_AG) {
2856 			/*
2857 			 * Start with allocation group given by bno.
2858 			 */
2859 			args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2860 			args->type = XFS_ALLOCTYPE_THIS_AG;
2861 			sagno = 0;
2862 			flags = 0;
2863 		} else {
2864 			/*
2865 			 * Start with the given allocation group.
2866 			 */
2867 			args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2868 			flags = XFS_ALLOC_FLAG_TRYLOCK;
2869 		}
2870 		/*
2871 		 * Loop over allocation groups twice; first time with
2872 		 * trylock set, second time without.
2873 		 */
2874 		for (;;) {
2875 			args->pag = xfs_perag_get(mp, args->agno);
2876 			error = xfs_alloc_fix_freelist(args, flags);
2877 			if (error) {
2878 				trace_xfs_alloc_vextent_nofix(args);
2879 				goto error0;
2880 			}
2881 			/*
2882 			 * If we get a buffer back then the allocation will fly.
2883 			 */
2884 			if (args->agbp) {
2885 				if ((error = xfs_alloc_ag_vextent(args)))
2886 					goto error0;
2887 				break;
2888 			}
2889 
2890 			trace_xfs_alloc_vextent_loopfailed(args);
2891 
2892 			/*
2893 			 * Didn't work, figure out the next iteration.
2894 			 */
2895 			if (args->agno == sagno &&
2896 			    type == XFS_ALLOCTYPE_START_BNO)
2897 				args->type = XFS_ALLOCTYPE_THIS_AG;
2898 			/*
2899 			* For the first allocation, we can try any AG to get
2900 			* space.  However, if we already have allocated a
2901 			* block, we don't want to try AGs whose number is below
2902 			* sagno. Otherwise, we may end up with out-of-order
2903 			* locking of AGF, which might cause deadlock.
2904 			*/
2905 			if (++(args->agno) == mp->m_sb.sb_agcount) {
2906 				if (args->tp->t_firstblock != NULLFSBLOCK)
2907 					args->agno = sagno;
2908 				else
2909 					args->agno = 0;
2910 			}
2911 			/*
2912 			 * Reached the starting a.g., must either be done
2913 			 * or switch to non-trylock mode.
2914 			 */
2915 			if (args->agno == sagno) {
2916 				if (flags == 0) {
2917 					args->agbno = NULLAGBLOCK;
2918 					trace_xfs_alloc_vextent_allfailed(args);
2919 					break;
2920 				}
2921 
2922 				flags = 0;
2923 				if (type == XFS_ALLOCTYPE_START_BNO) {
2924 					args->agbno = XFS_FSB_TO_AGBNO(mp,
2925 						args->fsbno);
2926 					args->type = XFS_ALLOCTYPE_NEAR_BNO;
2927 				}
2928 			}
2929 			xfs_perag_put(args->pag);
2930 		}
2931 		if (bump_rotor) {
2932 			if (args->agno == sagno)
2933 				mp->m_agfrotor = (mp->m_agfrotor + 1) %
2934 					(mp->m_sb.sb_agcount * rotorstep);
2935 			else
2936 				mp->m_agfrotor = (args->agno * rotorstep + 1) %
2937 					(mp->m_sb.sb_agcount * rotorstep);
2938 		}
2939 		break;
2940 	default:
2941 		ASSERT(0);
2942 		/* NOTREACHED */
2943 	}
2944 	if (args->agbno == NULLAGBLOCK)
2945 		args->fsbno = NULLFSBLOCK;
2946 	else {
2947 		args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2948 #ifdef DEBUG
2949 		ASSERT(args->len >= args->minlen);
2950 		ASSERT(args->len <= args->maxlen);
2951 		ASSERT(args->agbno % args->alignment == 0);
2952 		XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2953 			args->len);
2954 #endif
2955 
2956 		/* Zero the extent if we were asked to do so */
2957 		if (args->datatype & XFS_ALLOC_USERDATA_ZERO) {
2958 			error = xfs_zero_extent(args->ip, args->fsbno, args->len);
2959 			if (error)
2960 				goto error0;
2961 		}
2962 
2963 	}
2964 	xfs_perag_put(args->pag);
2965 	return 0;
2966 error0:
2967 	xfs_perag_put(args->pag);
2968 	return error;
2969 }
2970 
2971 /* Ensure that the freelist is at full capacity. */
2972 int
xfs_free_extent_fix_freelist(struct xfs_trans * tp,xfs_agnumber_t agno,struct xfs_buf ** agbp)2973 xfs_free_extent_fix_freelist(
2974 	struct xfs_trans	*tp,
2975 	xfs_agnumber_t		agno,
2976 	struct xfs_buf		**agbp)
2977 {
2978 	struct xfs_alloc_arg	args;
2979 	int			error;
2980 
2981 	memset(&args, 0, sizeof(struct xfs_alloc_arg));
2982 	args.tp = tp;
2983 	args.mp = tp->t_mountp;
2984 	args.agno = agno;
2985 
2986 	/*
2987 	 * validate that the block number is legal - the enables us to detect
2988 	 * and handle a silent filesystem corruption rather than crashing.
2989 	 */
2990 	if (args.agno >= args.mp->m_sb.sb_agcount)
2991 		return -EFSCORRUPTED;
2992 
2993 	args.pag = xfs_perag_get(args.mp, args.agno);
2994 	ASSERT(args.pag);
2995 
2996 	error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2997 	if (error)
2998 		goto out;
2999 
3000 	*agbp = args.agbp;
3001 out:
3002 	xfs_perag_put(args.pag);
3003 	return error;
3004 }
3005 
3006 /*
3007  * Free an extent.
3008  * Just break up the extent address and hand off to xfs_free_ag_extent
3009  * after fixing up the freelist.
3010  */
3011 int				/* error */
__xfs_free_extent(struct xfs_trans * tp,xfs_fsblock_t bno,xfs_extlen_t len,struct xfs_owner_info * oinfo,enum xfs_ag_resv_type type,bool skip_discard)3012 __xfs_free_extent(
3013 	struct xfs_trans	*tp,	/* transaction pointer */
3014 	xfs_fsblock_t		bno,	/* starting block number of extent */
3015 	xfs_extlen_t		len,	/* length of extent */
3016 	struct xfs_owner_info	*oinfo,	/* extent owner */
3017 	enum xfs_ag_resv_type	type,	/* block reservation type */
3018 	bool			skip_discard)
3019 {
3020 	struct xfs_mount	*mp = tp->t_mountp;
3021 	struct xfs_buf		*agbp;
3022 	xfs_agnumber_t		agno = XFS_FSB_TO_AGNO(mp, bno);
3023 	xfs_agblock_t		agbno = XFS_FSB_TO_AGBNO(mp, bno);
3024 	int			error;
3025 	unsigned int		busy_flags = 0;
3026 
3027 	ASSERT(len != 0);
3028 	ASSERT(type != XFS_AG_RESV_AGFL);
3029 
3030 	if (XFS_TEST_ERROR(false, mp,
3031 			XFS_ERRTAG_FREE_EXTENT))
3032 		return -EIO;
3033 
3034 	error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
3035 	if (error)
3036 		return error;
3037 
3038 	XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
3039 
3040 	/* validate the extent size is legal now we have the agf locked */
3041 	XFS_WANT_CORRUPTED_GOTO(mp,
3042 		agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
3043 				err);
3044 
3045 	error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
3046 	if (error)
3047 		goto err;
3048 
3049 	if (skip_discard)
3050 		busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
3051 	xfs_extent_busy_insert(tp, agno, agbno, len, busy_flags);
3052 	return 0;
3053 
3054 err:
3055 	xfs_trans_brelse(tp, agbp);
3056 	return error;
3057 }
3058 
3059 struct xfs_alloc_query_range_info {
3060 	xfs_alloc_query_range_fn	fn;
3061 	void				*priv;
3062 };
3063 
3064 /* Format btree record and pass to our callback. */
3065 STATIC int
xfs_alloc_query_range_helper(struct xfs_btree_cur * cur,union xfs_btree_rec * rec,void * priv)3066 xfs_alloc_query_range_helper(
3067 	struct xfs_btree_cur		*cur,
3068 	union xfs_btree_rec		*rec,
3069 	void				*priv)
3070 {
3071 	struct xfs_alloc_query_range_info	*query = priv;
3072 	struct xfs_alloc_rec_incore		irec;
3073 
3074 	irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
3075 	irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
3076 	return query->fn(cur, &irec, query->priv);
3077 }
3078 
3079 /* Find all free space within a given range of blocks. */
3080 int
xfs_alloc_query_range(struct xfs_btree_cur * cur,struct xfs_alloc_rec_incore * low_rec,struct xfs_alloc_rec_incore * high_rec,xfs_alloc_query_range_fn fn,void * priv)3081 xfs_alloc_query_range(
3082 	struct xfs_btree_cur			*cur,
3083 	struct xfs_alloc_rec_incore		*low_rec,
3084 	struct xfs_alloc_rec_incore		*high_rec,
3085 	xfs_alloc_query_range_fn		fn,
3086 	void					*priv)
3087 {
3088 	union xfs_btree_irec			low_brec;
3089 	union xfs_btree_irec			high_brec;
3090 	struct xfs_alloc_query_range_info	query;
3091 
3092 	ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3093 	low_brec.a = *low_rec;
3094 	high_brec.a = *high_rec;
3095 	query.priv = priv;
3096 	query.fn = fn;
3097 	return xfs_btree_query_range(cur, &low_brec, &high_brec,
3098 			xfs_alloc_query_range_helper, &query);
3099 }
3100 
3101 /* Find all free space records. */
3102 int
xfs_alloc_query_all(struct xfs_btree_cur * cur,xfs_alloc_query_range_fn fn,void * priv)3103 xfs_alloc_query_all(
3104 	struct xfs_btree_cur			*cur,
3105 	xfs_alloc_query_range_fn		fn,
3106 	void					*priv)
3107 {
3108 	struct xfs_alloc_query_range_info	query;
3109 
3110 	ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3111 	query.priv = priv;
3112 	query.fn = fn;
3113 	return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
3114 }
3115 
3116 /* Is there a record covering a given extent? */
3117 int
xfs_alloc_has_record(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,bool * exists)3118 xfs_alloc_has_record(
3119 	struct xfs_btree_cur	*cur,
3120 	xfs_agblock_t		bno,
3121 	xfs_extlen_t		len,
3122 	bool			*exists)
3123 {
3124 	union xfs_btree_irec	low;
3125 	union xfs_btree_irec	high;
3126 
3127 	memset(&low, 0, sizeof(low));
3128 	low.a.ar_startblock = bno;
3129 	memset(&high, 0xFF, sizeof(high));
3130 	high.a.ar_startblock = bno + len - 1;
3131 
3132 	return xfs_btree_has_record(cur, &low, &high, exists);
3133 }
3134 
3135 /*
3136  * Walk all the blocks in the AGFL.  The @walk_fn can return any negative
3137  * error code or XFS_BTREE_QUERY_RANGE_ABORT.
3138  */
3139 int
xfs_agfl_walk(struct xfs_mount * mp,struct xfs_agf * agf,struct xfs_buf * agflbp,xfs_agfl_walk_fn walk_fn,void * priv)3140 xfs_agfl_walk(
3141 	struct xfs_mount	*mp,
3142 	struct xfs_agf		*agf,
3143 	struct xfs_buf		*agflbp,
3144 	xfs_agfl_walk_fn	walk_fn,
3145 	void			*priv)
3146 {
3147 	__be32			*agfl_bno;
3148 	unsigned int		i;
3149 	int			error;
3150 
3151 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
3152 	i = be32_to_cpu(agf->agf_flfirst);
3153 
3154 	/* Nothing to walk in an empty AGFL. */
3155 	if (agf->agf_flcount == cpu_to_be32(0))
3156 		return 0;
3157 
3158 	/* Otherwise, walk from first to last, wrapping as needed. */
3159 	for (;;) {
3160 		error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
3161 		if (error)
3162 			return error;
3163 		if (i == be32_to_cpu(agf->agf_fllast))
3164 			break;
3165 		if (++i == xfs_agfl_size(mp))
3166 			i = 0;
3167 	}
3168 
3169 	return 0;
3170 }
3171