1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * Copyright (c) 2013 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_mount.h"
15 #include "xfs_da_format.h"
16 #include "xfs_da_btree.h"
17 #include "xfs_inode.h"
18 #include "xfs_attr_remote.h"
19 #include "xfs_trans.h"
20 #include "xfs_bmap.h"
21 #include "xfs_attr.h"
22 #include "xfs_attr_leaf.h"
23 #include "xfs_quota.h"
24 #include "xfs_dir2.h"
25 
26 /*
27  * Look at all the extents for this logical region,
28  * invalidate any buffers that are incore/in transactions.
29  */
30 STATIC int
xfs_attr3_leaf_freextent(struct xfs_trans ** trans,struct xfs_inode * dp,xfs_dablk_t blkno,int blkcnt)31 xfs_attr3_leaf_freextent(
32 	struct xfs_trans	**trans,
33 	struct xfs_inode	*dp,
34 	xfs_dablk_t		blkno,
35 	int			blkcnt)
36 {
37 	struct xfs_bmbt_irec	map;
38 	struct xfs_buf		*bp;
39 	xfs_dablk_t		tblkno;
40 	xfs_daddr_t		dblkno;
41 	int			tblkcnt;
42 	int			dblkcnt;
43 	int			nmap;
44 	int			error;
45 
46 	/*
47 	 * Roll through the "value", invalidating the attribute value's
48 	 * blocks.
49 	 */
50 	tblkno = blkno;
51 	tblkcnt = blkcnt;
52 	while (tblkcnt > 0) {
53 		/*
54 		 * Try to remember where we decided to put the value.
55 		 */
56 		nmap = 1;
57 		error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt,
58 				       &map, &nmap, XFS_BMAPI_ATTRFORK);
59 		if (error) {
60 			return error;
61 		}
62 		ASSERT(nmap == 1);
63 		ASSERT(map.br_startblock != DELAYSTARTBLOCK);
64 
65 		/*
66 		 * If it's a hole, these are already unmapped
67 		 * so there's nothing to invalidate.
68 		 */
69 		if (map.br_startblock != HOLESTARTBLOCK) {
70 
71 			dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
72 						  map.br_startblock);
73 			dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
74 						map.br_blockcount);
75 			bp = xfs_trans_get_buf(*trans,
76 					dp->i_mount->m_ddev_targp,
77 					dblkno, dblkcnt, 0);
78 			if (!bp)
79 				return -ENOMEM;
80 			xfs_trans_binval(*trans, bp);
81 			/*
82 			 * Roll to next transaction.
83 			 */
84 			error = xfs_trans_roll_inode(trans, dp);
85 			if (error)
86 				return error;
87 		}
88 
89 		tblkno += map.br_blockcount;
90 		tblkcnt -= map.br_blockcount;
91 	}
92 
93 	return 0;
94 }
95 
96 /*
97  * Invalidate all of the "remote" value regions pointed to by a particular
98  * leaf block.
99  * Note that we must release the lock on the buffer so that we are not
100  * caught holding something that the logging code wants to flush to disk.
101  */
102 STATIC int
xfs_attr3_leaf_inactive(struct xfs_trans ** trans,struct xfs_inode * dp,struct xfs_buf * bp)103 xfs_attr3_leaf_inactive(
104 	struct xfs_trans	**trans,
105 	struct xfs_inode	*dp,
106 	struct xfs_buf		*bp)
107 {
108 	struct xfs_attr_leafblock *leaf;
109 	struct xfs_attr3_icleaf_hdr ichdr;
110 	struct xfs_attr_leaf_entry *entry;
111 	struct xfs_attr_leaf_name_remote *name_rmt;
112 	struct xfs_attr_inactive_list *list;
113 	struct xfs_attr_inactive_list *lp;
114 	int			error;
115 	int			count;
116 	int			size;
117 	int			tmp;
118 	int			i;
119 	struct xfs_mount	*mp = bp->b_mount;
120 
121 	leaf = bp->b_addr;
122 	xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
123 
124 	/*
125 	 * Count the number of "remote" value extents.
126 	 */
127 	count = 0;
128 	entry = xfs_attr3_leaf_entryp(leaf);
129 	for (i = 0; i < ichdr.count; entry++, i++) {
130 		if (be16_to_cpu(entry->nameidx) &&
131 		    ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
132 			name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
133 			if (name_rmt->valueblk)
134 				count++;
135 		}
136 	}
137 
138 	/*
139 	 * If there are no "remote" values, we're done.
140 	 */
141 	if (count == 0) {
142 		xfs_trans_brelse(*trans, bp);
143 		return 0;
144 	}
145 
146 	/*
147 	 * Allocate storage for a list of all the "remote" value extents.
148 	 */
149 	size = count * sizeof(xfs_attr_inactive_list_t);
150 	list = kmem_alloc(size, 0);
151 
152 	/*
153 	 * Identify each of the "remote" value extents.
154 	 */
155 	lp = list;
156 	entry = xfs_attr3_leaf_entryp(leaf);
157 	for (i = 0; i < ichdr.count; entry++, i++) {
158 		if (be16_to_cpu(entry->nameidx) &&
159 		    ((entry->flags & XFS_ATTR_LOCAL) == 0)) {
160 			name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
161 			if (name_rmt->valueblk) {
162 				lp->valueblk = be32_to_cpu(name_rmt->valueblk);
163 				lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount,
164 						    be32_to_cpu(name_rmt->valuelen));
165 				lp++;
166 			}
167 		}
168 	}
169 	xfs_trans_brelse(*trans, bp);	/* unlock for trans. in freextent() */
170 
171 	/*
172 	 * Invalidate each of the "remote" value extents.
173 	 */
174 	error = 0;
175 	for (lp = list, i = 0; i < count; i++, lp++) {
176 		tmp = xfs_attr3_leaf_freextent(trans, dp,
177 				lp->valueblk, lp->valuelen);
178 
179 		if (error == 0)
180 			error = tmp;	/* save only the 1st errno */
181 	}
182 
183 	kmem_free(list);
184 	return error;
185 }
186 
187 /*
188  * Recurse (gasp!) through the attribute nodes until we find leaves.
189  * We're doing a depth-first traversal in order to invalidate everything.
190  */
191 STATIC int
xfs_attr3_node_inactive(struct xfs_trans ** trans,struct xfs_inode * dp,struct xfs_buf * bp,int level)192 xfs_attr3_node_inactive(
193 	struct xfs_trans **trans,
194 	struct xfs_inode *dp,
195 	struct xfs_buf	*bp,
196 	int		level)
197 {
198 	xfs_da_blkinfo_t *info;
199 	xfs_da_intnode_t *node;
200 	xfs_dablk_t child_fsb;
201 	xfs_daddr_t parent_blkno, child_blkno;
202 	int error, i;
203 	struct xfs_buf *child_bp;
204 	struct xfs_da_node_entry *btree;
205 	struct xfs_da3_icnode_hdr ichdr;
206 
207 	/*
208 	 * Since this code is recursive (gasp!) we must protect ourselves.
209 	 */
210 	if (level > XFS_DA_NODE_MAXDEPTH) {
211 		xfs_trans_brelse(*trans, bp);	/* no locks for later trans */
212 		return -EIO;
213 	}
214 
215 	node = bp->b_addr;
216 	dp->d_ops->node_hdr_from_disk(&ichdr, node);
217 	parent_blkno = bp->b_bn;
218 	if (!ichdr.count) {
219 		xfs_trans_brelse(*trans, bp);
220 		return 0;
221 	}
222 	btree = dp->d_ops->node_tree_p(node);
223 	child_fsb = be32_to_cpu(btree[0].before);
224 	xfs_trans_brelse(*trans, bp);	/* no locks for later trans */
225 
226 	/*
227 	 * If this is the node level just above the leaves, simply loop
228 	 * over the leaves removing all of them.  If this is higher up
229 	 * in the tree, recurse downward.
230 	 */
231 	for (i = 0; i < ichdr.count; i++) {
232 		/*
233 		 * Read the subsidiary block to see what we have to work with.
234 		 * Don't do this in a transaction.  This is a depth-first
235 		 * traversal of the tree so we may deal with many blocks
236 		 * before we come back to this one.
237 		 */
238 		error = xfs_da3_node_read(*trans, dp, child_fsb, -1, &child_bp,
239 					  XFS_ATTR_FORK);
240 		if (error)
241 			return error;
242 
243 		/* save for re-read later */
244 		child_blkno = XFS_BUF_ADDR(child_bp);
245 
246 		/*
247 		 * Invalidate the subtree, however we have to.
248 		 */
249 		info = child_bp->b_addr;
250 		switch (info->magic) {
251 		case cpu_to_be16(XFS_DA_NODE_MAGIC):
252 		case cpu_to_be16(XFS_DA3_NODE_MAGIC):
253 			error = xfs_attr3_node_inactive(trans, dp, child_bp,
254 							level + 1);
255 			break;
256 		case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
257 		case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
258 			error = xfs_attr3_leaf_inactive(trans, dp, child_bp);
259 			break;
260 		default:
261 			error = -EIO;
262 			xfs_trans_brelse(*trans, child_bp);
263 			break;
264 		}
265 		if (error)
266 			return error;
267 
268 		/*
269 		 * Remove the subsidiary block from the cache and from the log.
270 		 */
271 		error = xfs_da_get_buf(*trans, dp, 0, child_blkno, &child_bp,
272 				       XFS_ATTR_FORK);
273 		if (error)
274 			return error;
275 		xfs_trans_binval(*trans, child_bp);
276 
277 		/*
278 		 * If we're not done, re-read the parent to get the next
279 		 * child block number.
280 		 */
281 		if (i + 1 < ichdr.count) {
282 			error = xfs_da3_node_read(*trans, dp, 0, parent_blkno,
283 						 &bp, XFS_ATTR_FORK);
284 			if (error)
285 				return error;
286 			node = bp->b_addr;
287 			btree = dp->d_ops->node_tree_p(node);
288 			child_fsb = be32_to_cpu(btree[i + 1].before);
289 			xfs_trans_brelse(*trans, bp);
290 		}
291 		/*
292 		 * Atomically commit the whole invalidate stuff.
293 		 */
294 		error = xfs_trans_roll_inode(trans, dp);
295 		if (error)
296 			return  error;
297 	}
298 
299 	return 0;
300 }
301 
302 /*
303  * Indiscriminately delete the entire attribute fork
304  *
305  * Recurse (gasp!) through the attribute nodes until we find leaves.
306  * We're doing a depth-first traversal in order to invalidate everything.
307  */
308 static int
xfs_attr3_root_inactive(struct xfs_trans ** trans,struct xfs_inode * dp)309 xfs_attr3_root_inactive(
310 	struct xfs_trans	**trans,
311 	struct xfs_inode	*dp)
312 {
313 	struct xfs_da_blkinfo	*info;
314 	struct xfs_buf		*bp;
315 	xfs_daddr_t		blkno;
316 	int			error;
317 
318 	/*
319 	 * Read block 0 to see what we have to work with.
320 	 * We only get here if we have extents, since we remove
321 	 * the extents in reverse order the extent containing
322 	 * block 0 must still be there.
323 	 */
324 	error = xfs_da3_node_read(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
325 	if (error)
326 		return error;
327 	blkno = bp->b_bn;
328 
329 	/*
330 	 * Invalidate the tree, even if the "tree" is only a single leaf block.
331 	 * This is a depth-first traversal!
332 	 */
333 	info = bp->b_addr;
334 	switch (info->magic) {
335 	case cpu_to_be16(XFS_DA_NODE_MAGIC):
336 	case cpu_to_be16(XFS_DA3_NODE_MAGIC):
337 		error = xfs_attr3_node_inactive(trans, dp, bp, 1);
338 		break;
339 	case cpu_to_be16(XFS_ATTR_LEAF_MAGIC):
340 	case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC):
341 		error = xfs_attr3_leaf_inactive(trans, dp, bp);
342 		break;
343 	default:
344 		error = -EIO;
345 		xfs_trans_brelse(*trans, bp);
346 		break;
347 	}
348 	if (error)
349 		return error;
350 
351 	/*
352 	 * Invalidate the incore copy of the root block.
353 	 */
354 	error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
355 	if (error)
356 		return error;
357 	xfs_trans_binval(*trans, bp);	/* remove from cache */
358 	/*
359 	 * Commit the invalidate and start the next transaction.
360 	 */
361 	error = xfs_trans_roll_inode(trans, dp);
362 
363 	return error;
364 }
365 
366 /*
367  * xfs_attr_inactive kills all traces of an attribute fork on an inode. It
368  * removes both the on-disk and in-memory inode fork. Note that this also has to
369  * handle the condition of inodes without attributes but with an attribute fork
370  * configured, so we can't use xfs_inode_hasattr() here.
371  *
372  * The in-memory attribute fork is removed even on error.
373  */
374 int
xfs_attr_inactive(struct xfs_inode * dp)375 xfs_attr_inactive(
376 	struct xfs_inode	*dp)
377 {
378 	struct xfs_trans	*trans;
379 	struct xfs_mount	*mp;
380 	int			lock_mode = XFS_ILOCK_SHARED;
381 	int			error = 0;
382 
383 	mp = dp->i_mount;
384 	ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
385 
386 	xfs_ilock(dp, lock_mode);
387 	if (!XFS_IFORK_Q(dp))
388 		goto out_destroy_fork;
389 	xfs_iunlock(dp, lock_mode);
390 
391 	lock_mode = 0;
392 
393 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_attrinval, 0, 0, 0, &trans);
394 	if (error)
395 		goto out_destroy_fork;
396 
397 	lock_mode = XFS_ILOCK_EXCL;
398 	xfs_ilock(dp, lock_mode);
399 
400 	if (!XFS_IFORK_Q(dp))
401 		goto out_cancel;
402 
403 	/*
404 	 * No need to make quota reservations here. We expect to release some
405 	 * blocks, not allocate, in the common case.
406 	 */
407 	xfs_trans_ijoin(trans, dp, 0);
408 
409 	/*
410 	 * Invalidate and truncate the attribute fork extents. Make sure the
411 	 * fork actually has attributes as otherwise the invalidation has no
412 	 * blocks to read and returns an error. In this case, just do the fork
413 	 * removal below.
414 	 */
415 	if (xfs_inode_hasattr(dp) &&
416 	    dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
417 		error = xfs_attr3_root_inactive(&trans, dp);
418 		if (error)
419 			goto out_cancel;
420 
421 		error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
422 		if (error)
423 			goto out_cancel;
424 	}
425 
426 	/* Reset the attribute fork - this also destroys the in-core fork */
427 	xfs_attr_fork_remove(dp, trans);
428 
429 	error = xfs_trans_commit(trans);
430 	xfs_iunlock(dp, lock_mode);
431 	return error;
432 
433 out_cancel:
434 	xfs_trans_cancel(trans);
435 out_destroy_fork:
436 	/* kill the in-core attr fork before we drop the inode lock */
437 	if (dp->i_afp)
438 		xfs_idestroy_fork(dp, XFS_ATTR_FORK);
439 	if (lock_mode)
440 		xfs_iunlock(dp, lock_mode);
441 	return error;
442 }
443