1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #ifndef BTRFS_INODE_H
7 #define BTRFS_INODE_H
8 
9 #include <linux/hash.h>
10 #include "extent_map.h"
11 #include "extent_io.h"
12 #include "ordered-data.h"
13 #include "delayed-inode.h"
14 
15 /*
16  * ordered_data_close is set by truncate when a file that used
17  * to have good data has been truncated to zero.  When it is set
18  * the btrfs file release call will add this inode to the
19  * ordered operations list so that we make sure to flush out any
20  * new data the application may have written before commit.
21  */
22 enum {
23 	BTRFS_INODE_ORDERED_DATA_CLOSE,
24 	BTRFS_INODE_DUMMY,
25 	BTRFS_INODE_IN_DEFRAG,
26 	BTRFS_INODE_HAS_ASYNC_EXTENT,
27 	BTRFS_INODE_NEEDS_FULL_SYNC,
28 	BTRFS_INODE_COPY_EVERYTHING,
29 	BTRFS_INODE_IN_DELALLOC_LIST,
30 	BTRFS_INODE_READDIO_NEED_LOCK,
31 	BTRFS_INODE_HAS_PROPS,
32 	BTRFS_INODE_SNAPSHOT_FLUSH,
33 };
34 
35 /* in memory btrfs inode */
36 struct btrfs_inode {
37 	/* which subvolume this inode belongs to */
38 	struct btrfs_root *root;
39 
40 	/* key used to find this inode on disk.  This is used by the code
41 	 * to read in roots of subvolumes
42 	 */
43 	struct btrfs_key location;
44 
45 	/*
46 	 * Lock for counters and all fields used to determine if the inode is in
47 	 * the log or not (last_trans, last_sub_trans, last_log_commit,
48 	 * logged_trans).
49 	 */
50 	spinlock_t lock;
51 
52 	/* the extent_tree has caches of all the extent mappings to disk */
53 	struct extent_map_tree extent_tree;
54 
55 	/* the io_tree does range state (DIRTY, LOCKED etc) */
56 	struct extent_io_tree io_tree;
57 
58 	/* special utility tree used to record which mirrors have already been
59 	 * tried when checksums fail for a given block
60 	 */
61 	struct extent_io_tree io_failure_tree;
62 
63 	/* held while logging the inode in tree-log.c */
64 	struct mutex log_mutex;
65 
66 	/* held while doing delalloc reservations */
67 	struct mutex delalloc_mutex;
68 
69 	/* used to order data wrt metadata */
70 	struct btrfs_ordered_inode_tree ordered_tree;
71 
72 	/* list of all the delalloc inodes in the FS.  There are times we need
73 	 * to write all the delalloc pages to disk, and this list is used
74 	 * to walk them all.
75 	 */
76 	struct list_head delalloc_inodes;
77 
78 	/* node for the red-black tree that links inodes in subvolume root */
79 	struct rb_node rb_node;
80 
81 	unsigned long runtime_flags;
82 
83 	/* Keep track of who's O_SYNC/fsyncing currently */
84 	atomic_t sync_writers;
85 
86 	/* full 64 bit generation number, struct vfs_inode doesn't have a big
87 	 * enough field for this.
88 	 */
89 	u64 generation;
90 
91 	/*
92 	 * transid of the trans_handle that last modified this inode
93 	 */
94 	u64 last_trans;
95 
96 	/*
97 	 * transid that last logged this inode
98 	 */
99 	u64 logged_trans;
100 
101 	/*
102 	 * log transid when this inode was last modified
103 	 */
104 	int last_sub_trans;
105 
106 	/* a local copy of root's last_log_commit */
107 	int last_log_commit;
108 
109 	/* total number of bytes pending delalloc, used by stat to calc the
110 	 * real block usage of the file
111 	 */
112 	u64 delalloc_bytes;
113 
114 	/*
115 	 * Total number of bytes pending delalloc that fall within a file
116 	 * range that is either a hole or beyond EOF (and no prealloc extent
117 	 * exists in the range). This is always <= delalloc_bytes.
118 	 */
119 	u64 new_delalloc_bytes;
120 
121 	/*
122 	 * total number of bytes pending defrag, used by stat to check whether
123 	 * it needs COW.
124 	 */
125 	u64 defrag_bytes;
126 
127 	/*
128 	 * the size of the file stored in the metadata on disk.  data=ordered
129 	 * means the in-memory i_size might be larger than the size on disk
130 	 * because not all the blocks are written yet.
131 	 */
132 	u64 disk_i_size;
133 
134 	/*
135 	 * if this is a directory then index_cnt is the counter for the index
136 	 * number for new files that are created
137 	 */
138 	u64 index_cnt;
139 
140 	/* Cache the directory index number to speed the dir/file remove */
141 	u64 dir_index;
142 
143 	/* the fsync log has some corner cases that mean we have to check
144 	 * directories to see if any unlinks have been done before
145 	 * the directory was logged.  See tree-log.c for all the
146 	 * details
147 	 */
148 	u64 last_unlink_trans;
149 
150 	/*
151 	 * Number of bytes outstanding that are going to need csums.  This is
152 	 * used in ENOSPC accounting.
153 	 */
154 	u64 csum_bytes;
155 
156 	/* flags field from the on disk inode */
157 	u32 flags;
158 
159 	/*
160 	 * Counters to keep track of the number of extent item's we may use due
161 	 * to delalloc and such.  outstanding_extents is the number of extent
162 	 * items we think we'll end up using, and reserved_extents is the number
163 	 * of extent items we've reserved metadata for.
164 	 */
165 	unsigned outstanding_extents;
166 
167 	struct btrfs_block_rsv block_rsv;
168 
169 	/*
170 	 * Cached values of inode properties
171 	 */
172 	unsigned prop_compress;		/* per-file compression algorithm */
173 	/*
174 	 * Force compression on the file using the defrag ioctl, could be
175 	 * different from prop_compress and takes precedence if set
176 	 */
177 	unsigned defrag_compress;
178 
179 	struct btrfs_delayed_node *delayed_node;
180 
181 	/* File creation time. */
182 	struct timespec64 i_otime;
183 
184 	/* Hook into fs_info->delayed_iputs */
185 	struct list_head delayed_iput;
186 
187 	/*
188 	 * To avoid races between lockless (i_mutex not held) direct IO writes
189 	 * and concurrent fsync requests. Direct IO writes must acquire read
190 	 * access on this semaphore for creating an extent map and its
191 	 * corresponding ordered extent. The fast fsync path must acquire write
192 	 * access on this semaphore before it collects ordered extents and
193 	 * extent maps.
194 	 */
195 	struct rw_semaphore dio_sem;
196 
197 	struct inode vfs_inode;
198 };
199 
BTRFS_I(const struct inode * inode)200 static inline struct btrfs_inode *BTRFS_I(const struct inode *inode)
201 {
202 	return container_of(inode, struct btrfs_inode, vfs_inode);
203 }
204 
btrfs_inode_hash(u64 objectid,const struct btrfs_root * root)205 static inline unsigned long btrfs_inode_hash(u64 objectid,
206 					     const struct btrfs_root *root)
207 {
208 	u64 h = objectid ^ (root->root_key.objectid * GOLDEN_RATIO_PRIME);
209 
210 #if BITS_PER_LONG == 32
211 	h = (h >> 32) ^ (h & 0xffffffff);
212 #endif
213 
214 	return (unsigned long)h;
215 }
216 
btrfs_insert_inode_hash(struct inode * inode)217 static inline void btrfs_insert_inode_hash(struct inode *inode)
218 {
219 	unsigned long h = btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root);
220 
221 	__insert_inode_hash(inode, h);
222 }
223 
btrfs_ino(const struct btrfs_inode * inode)224 static inline u64 btrfs_ino(const struct btrfs_inode *inode)
225 {
226 	u64 ino = inode->location.objectid;
227 
228 	/*
229 	 * !ino: btree_inode
230 	 * type == BTRFS_ROOT_ITEM_KEY: subvol dir
231 	 */
232 	if (!ino || inode->location.type == BTRFS_ROOT_ITEM_KEY)
233 		ino = inode->vfs_inode.i_ino;
234 	return ino;
235 }
236 
btrfs_i_size_write(struct btrfs_inode * inode,u64 size)237 static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size)
238 {
239 	i_size_write(&inode->vfs_inode, size);
240 	inode->disk_i_size = size;
241 }
242 
btrfs_is_free_space_inode(struct btrfs_inode * inode)243 static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode)
244 {
245 	struct btrfs_root *root = inode->root;
246 
247 	if (root == root->fs_info->tree_root &&
248 	    btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID)
249 		return true;
250 	if (inode->location.objectid == BTRFS_FREE_INO_OBJECTID)
251 		return true;
252 	return false;
253 }
254 
is_data_inode(struct inode * inode)255 static inline bool is_data_inode(struct inode *inode)
256 {
257 	return btrfs_ino(BTRFS_I(inode)) != BTRFS_BTREE_INODE_OBJECTID;
258 }
259 
btrfs_mod_outstanding_extents(struct btrfs_inode * inode,int mod)260 static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
261 						 int mod)
262 {
263 	lockdep_assert_held(&inode->lock);
264 	inode->outstanding_extents += mod;
265 	if (btrfs_is_free_space_inode(inode))
266 		return;
267 	trace_btrfs_inode_mod_outstanding_extents(inode->root, btrfs_ino(inode),
268 						  mod);
269 }
270 
btrfs_inode_in_log(struct btrfs_inode * inode,u64 generation)271 static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
272 {
273 	int ret = 0;
274 
275 	spin_lock(&inode->lock);
276 	if (inode->logged_trans == generation &&
277 	    inode->last_sub_trans <= inode->last_log_commit &&
278 	    inode->last_sub_trans <= inode->root->last_log_commit) {
279 		/*
280 		 * After a ranged fsync we might have left some extent maps
281 		 * (that fall outside the fsync's range). So return false
282 		 * here if the list isn't empty, to make sure btrfs_log_inode()
283 		 * will be called and process those extent maps.
284 		 */
285 		smp_mb();
286 		if (list_empty(&inode->extent_tree.modified_extents))
287 			ret = 1;
288 	}
289 	spin_unlock(&inode->lock);
290 	return ret;
291 }
292 
293 #define BTRFS_DIO_ORIG_BIO_SUBMITTED	0x1
294 
295 struct btrfs_dio_private {
296 	struct inode *inode;
297 	unsigned long flags;
298 	u64 logical_offset;
299 	u64 disk_bytenr;
300 	u64 bytes;
301 	void *private;
302 
303 	/* number of bios pending for this dio */
304 	atomic_t pending_bios;
305 
306 	/* IO errors */
307 	int errors;
308 
309 	/* orig_bio is our btrfs_io_bio */
310 	struct bio *orig_bio;
311 
312 	/* dio_bio came from fs/direct-io.c */
313 	struct bio *dio_bio;
314 
315 	/*
316 	 * The original bio may be split to several sub-bios, this is
317 	 * done during endio of sub-bios
318 	 */
319 	blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *,
320 			blk_status_t);
321 };
322 
323 /*
324  * Disable DIO read nolock optimization, so new dio readers will be forced
325  * to grab i_mutex. It is used to avoid the endless truncate due to
326  * nonlocked dio read.
327  */
btrfs_inode_block_unlocked_dio(struct btrfs_inode * inode)328 static inline void btrfs_inode_block_unlocked_dio(struct btrfs_inode *inode)
329 {
330 	set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
331 	smp_mb();
332 }
333 
btrfs_inode_resume_unlocked_dio(struct btrfs_inode * inode)334 static inline void btrfs_inode_resume_unlocked_dio(struct btrfs_inode *inode)
335 {
336 	smp_mb__before_atomic();
337 	clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
338 }
339 
340 /* Array of bytes with variable length, hexadecimal format 0x1234 */
341 #define CSUM_FMT				"0x%*phN"
342 #define CSUM_FMT_VALUE(size, bytes)		size, bytes
343 
btrfs_print_data_csum_error(struct btrfs_inode * inode,u64 logical_start,u8 * csum,u8 * csum_expected,int mirror_num)344 static inline void btrfs_print_data_csum_error(struct btrfs_inode *inode,
345 		u64 logical_start, u8 *csum, u8 *csum_expected, int mirror_num)
346 {
347 	struct btrfs_root *root = inode->root;
348 	struct btrfs_super_block *sb = root->fs_info->super_copy;
349 	const u16 csum_size = btrfs_super_csum_size(sb);
350 
351 	/* Output minus objectid, which is more meaningful */
352 	if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID)
353 		btrfs_warn_rl(root->fs_info,
354 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
355 			root->root_key.objectid, btrfs_ino(inode),
356 			logical_start,
357 			CSUM_FMT_VALUE(csum_size, csum),
358 			CSUM_FMT_VALUE(csum_size, csum_expected),
359 			mirror_num);
360 	else
361 		btrfs_warn_rl(root->fs_info,
362 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
363 			root->root_key.objectid, btrfs_ino(inode),
364 			logical_start,
365 			CSUM_FMT_VALUE(csum_size, csum),
366 			CSUM_FMT_VALUE(csum_size, csum_expected),
367 			mirror_num);
368 }
369 
370 #endif
371