1 /*
2  * fs/f2fs/inode.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/backing-dev.h>
15 #include <linux/writeback.h>
16 
17 #include "f2fs.h"
18 #include "node.h"
19 #include "segment.h"
20 
21 #include <trace/events/f2fs.h>
22 
f2fs_mark_inode_dirty_sync(struct inode * inode,bool sync)23 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
24 {
25 	if (is_inode_flag_set(inode, FI_NEW_INODE))
26 		return;
27 
28 	if (f2fs_inode_dirtied(inode, sync))
29 		return;
30 
31 	mark_inode_dirty_sync(inode);
32 }
33 
f2fs_set_inode_flags(struct inode * inode)34 void f2fs_set_inode_flags(struct inode *inode)
35 {
36 	unsigned int flags = F2FS_I(inode)->i_flags;
37 	unsigned int new_fl = 0;
38 
39 	if (flags & F2FS_SYNC_FL)
40 		new_fl |= S_SYNC;
41 	if (flags & F2FS_APPEND_FL)
42 		new_fl |= S_APPEND;
43 	if (flags & F2FS_IMMUTABLE_FL)
44 		new_fl |= S_IMMUTABLE;
45 	if (flags & F2FS_NOATIME_FL)
46 		new_fl |= S_NOATIME;
47 	if (flags & F2FS_DIRSYNC_FL)
48 		new_fl |= S_DIRSYNC;
49 	if (f2fs_encrypted_inode(inode))
50 		new_fl |= S_ENCRYPTED;
51 	inode_set_flags(inode, new_fl,
52 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
53 			S_ENCRYPTED);
54 }
55 
__get_inode_rdev(struct inode * inode,struct f2fs_inode * ri)56 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
57 {
58 	int extra_size = get_extra_isize(inode);
59 
60 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
61 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
62 		if (ri->i_addr[extra_size])
63 			inode->i_rdev = old_decode_dev(
64 				le32_to_cpu(ri->i_addr[extra_size]));
65 		else
66 			inode->i_rdev = new_decode_dev(
67 				le32_to_cpu(ri->i_addr[extra_size + 1]));
68 	}
69 }
70 
__written_first_block(struct f2fs_sb_info * sbi,struct f2fs_inode * ri)71 static int __written_first_block(struct f2fs_sb_info *sbi,
72 					struct f2fs_inode *ri)
73 {
74 	block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
75 
76 	if (!__is_valid_data_blkaddr(addr))
77 		return 1;
78 	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
79 		return -EFAULT;
80 	return 0;
81 }
82 
__set_inode_rdev(struct inode * inode,struct f2fs_inode * ri)83 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
84 {
85 	int extra_size = get_extra_isize(inode);
86 
87 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
88 		if (old_valid_dev(inode->i_rdev)) {
89 			ri->i_addr[extra_size] =
90 				cpu_to_le32(old_encode_dev(inode->i_rdev));
91 			ri->i_addr[extra_size + 1] = 0;
92 		} else {
93 			ri->i_addr[extra_size] = 0;
94 			ri->i_addr[extra_size + 1] =
95 				cpu_to_le32(new_encode_dev(inode->i_rdev));
96 			ri->i_addr[extra_size + 2] = 0;
97 		}
98 	}
99 }
100 
__recover_inline_status(struct inode * inode,struct page * ipage)101 static void __recover_inline_status(struct inode *inode, struct page *ipage)
102 {
103 	void *inline_data = inline_data_addr(inode, ipage);
104 	__le32 *start = inline_data;
105 	__le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
106 
107 	while (start < end) {
108 		if (*start++) {
109 			f2fs_wait_on_page_writeback(ipage, NODE, true);
110 
111 			set_inode_flag(inode, FI_DATA_EXIST);
112 			set_raw_inline(inode, F2FS_INODE(ipage));
113 			set_page_dirty(ipage);
114 			return;
115 		}
116 	}
117 	return;
118 }
119 
f2fs_enable_inode_chksum(struct f2fs_sb_info * sbi,struct page * page)120 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
121 {
122 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
123 
124 	if (!f2fs_sb_has_inode_chksum(sbi->sb))
125 		return false;
126 
127 	if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
128 		return false;
129 
130 	if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
131 				i_inode_checksum))
132 		return false;
133 
134 	return true;
135 }
136 
f2fs_inode_chksum(struct f2fs_sb_info * sbi,struct page * page)137 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
138 {
139 	struct f2fs_node *node = F2FS_NODE(page);
140 	struct f2fs_inode *ri = &node->i;
141 	__le32 ino = node->footer.ino;
142 	__le32 gen = ri->i_generation;
143 	__u32 chksum, chksum_seed;
144 	__u32 dummy_cs = 0;
145 	unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
146 	unsigned int cs_size = sizeof(dummy_cs);
147 
148 	chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
149 							sizeof(ino));
150 	chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
151 
152 	chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
153 	chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
154 	offset += cs_size;
155 	chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
156 						F2FS_BLKSIZE - offset);
157 	return chksum;
158 }
159 
f2fs_inode_chksum_verify(struct f2fs_sb_info * sbi,struct page * page)160 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
161 {
162 	struct f2fs_inode *ri;
163 	__u32 provided, calculated;
164 
165 	if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
166 		return true;
167 
168 #ifdef CONFIG_F2FS_CHECK_FS
169 	if (!f2fs_enable_inode_chksum(sbi, page))
170 #else
171 	if (!f2fs_enable_inode_chksum(sbi, page) ||
172 			PageDirty(page) || PageWriteback(page))
173 #endif
174 		return true;
175 
176 	ri = &F2FS_NODE(page)->i;
177 	provided = le32_to_cpu(ri->i_inode_checksum);
178 	calculated = f2fs_inode_chksum(sbi, page);
179 
180 	if (provided != calculated)
181 		f2fs_msg(sbi->sb, KERN_WARNING,
182 			"checksum invalid, ino = %x, %x vs. %x",
183 			ino_of_node(page), provided, calculated);
184 
185 	return provided == calculated;
186 }
187 
f2fs_inode_chksum_set(struct f2fs_sb_info * sbi,struct page * page)188 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
189 {
190 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
191 
192 	if (!f2fs_enable_inode_chksum(sbi, page))
193 		return;
194 
195 	ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
196 }
197 
sanity_check_inode(struct inode * inode,struct page * node_page)198 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
199 {
200 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
201 	struct f2fs_inode_info *fi = F2FS_I(inode);
202 	unsigned long long iblocks;
203 
204 	iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
205 	if (!iblocks) {
206 		set_sbi_flag(sbi, SBI_NEED_FSCK);
207 		f2fs_msg(sbi->sb, KERN_WARNING,
208 			"%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
209 			"run fsck to fix.",
210 			__func__, inode->i_ino, iblocks);
211 		return false;
212 	}
213 
214 	if (ino_of_node(node_page) != nid_of_node(node_page)) {
215 		set_sbi_flag(sbi, SBI_NEED_FSCK);
216 		f2fs_msg(sbi->sb, KERN_WARNING,
217 			"%s: corrupted inode footer i_ino=%lx, ino,nid: "
218 			"[%u, %u] run fsck to fix.",
219 			__func__, inode->i_ino,
220 			ino_of_node(node_page), nid_of_node(node_page));
221 		return false;
222 	}
223 
224 	if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)
225 			&& !f2fs_has_extra_attr(inode)) {
226 		set_sbi_flag(sbi, SBI_NEED_FSCK);
227 		f2fs_msg(sbi->sb, KERN_WARNING,
228 			"%s: corrupted inode ino=%lx, run fsck to fix.",
229 			__func__, inode->i_ino);
230 		return false;
231 	}
232 
233 	if (f2fs_has_extra_attr(inode) &&
234 			!f2fs_sb_has_extra_attr(sbi->sb)) {
235 		set_sbi_flag(sbi, SBI_NEED_FSCK);
236 		f2fs_msg(sbi->sb, KERN_WARNING,
237 			"%s: inode (ino=%lx) is with extra_attr, "
238 			"but extra_attr feature is off",
239 			__func__, inode->i_ino);
240 		return false;
241 	}
242 
243 	if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
244 			fi->i_extra_isize % sizeof(__le32)) {
245 		set_sbi_flag(sbi, SBI_NEED_FSCK);
246 		f2fs_msg(sbi->sb, KERN_WARNING,
247 			"%s: inode (ino=%lx) has corrupted i_extra_isize: %d, "
248 			"max: %zu",
249 			__func__, inode->i_ino, fi->i_extra_isize,
250 			F2FS_TOTAL_EXTRA_ATTR_SIZE);
251 		return false;
252 	}
253 
254 	if (F2FS_I(inode)->extent_tree) {
255 		struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
256 
257 		if (ei->len &&
258 			(!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
259 			!f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
260 							DATA_GENERIC))) {
261 			set_sbi_flag(sbi, SBI_NEED_FSCK);
262 			f2fs_msg(sbi->sb, KERN_WARNING,
263 				"%s: inode (ino=%lx) extent info [%u, %u, %u] "
264 				"is incorrect, run fsck to fix",
265 				__func__, inode->i_ino,
266 				ei->blk, ei->fofs, ei->len);
267 			return false;
268 		}
269 	}
270 
271 	if (f2fs_has_inline_data(inode) &&
272 			(!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
273 		set_sbi_flag(sbi, SBI_NEED_FSCK);
274 		f2fs_msg(sbi->sb, KERN_WARNING,
275 			"%s: inode (ino=%lx, mode=%u) should not have "
276 			"inline_data, run fsck to fix",
277 			__func__, inode->i_ino, inode->i_mode);
278 		return false;
279 	}
280 
281 	if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
282 		set_sbi_flag(sbi, SBI_NEED_FSCK);
283 		f2fs_msg(sbi->sb, KERN_WARNING,
284 			"%s: inode (ino=%lx, mode=%u) should not have "
285 			"inline_dentry, run fsck to fix",
286 			__func__, inode->i_ino, inode->i_mode);
287 		return false;
288 	}
289 
290 	return true;
291 }
292 
do_read_inode(struct inode * inode)293 static int do_read_inode(struct inode *inode)
294 {
295 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
296 	struct f2fs_inode_info *fi = F2FS_I(inode);
297 	struct page *node_page;
298 	struct f2fs_inode *ri;
299 	projid_t i_projid;
300 	int err;
301 
302 	/* Check if ino is within scope */
303 	if (f2fs_check_nid_range(sbi, inode->i_ino))
304 		return -EINVAL;
305 
306 	node_page = f2fs_get_node_page(sbi, inode->i_ino);
307 	if (IS_ERR(node_page))
308 		return PTR_ERR(node_page);
309 
310 	ri = F2FS_INODE(node_page);
311 
312 	inode->i_mode = le16_to_cpu(ri->i_mode);
313 	i_uid_write(inode, le32_to_cpu(ri->i_uid));
314 	i_gid_write(inode, le32_to_cpu(ri->i_gid));
315 	set_nlink(inode, le32_to_cpu(ri->i_links));
316 	inode->i_size = le64_to_cpu(ri->i_size);
317 	inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
318 
319 	inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
320 	inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
321 	inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
322 	inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
323 	inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
324 	inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
325 	inode->i_generation = le32_to_cpu(ri->i_generation);
326 	if (S_ISDIR(inode->i_mode))
327 		fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
328 	else if (S_ISREG(inode->i_mode))
329 		fi->i_gc_failures[GC_FAILURE_PIN] =
330 					le16_to_cpu(ri->i_gc_failures);
331 	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
332 	fi->i_flags = le32_to_cpu(ri->i_flags);
333 	fi->flags = 0;
334 	fi->i_advise = ri->i_advise;
335 	fi->i_pino = le32_to_cpu(ri->i_pino);
336 	fi->i_dir_level = ri->i_dir_level;
337 
338 	if (f2fs_init_extent_tree(inode, &ri->i_ext))
339 		set_page_dirty(node_page);
340 
341 	get_inline_info(inode, ri);
342 
343 	fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
344 					le16_to_cpu(ri->i_extra_isize) : 0;
345 
346 	if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) {
347 		fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
348 	} else if (f2fs_has_inline_xattr(inode) ||
349 				f2fs_has_inline_dentry(inode)) {
350 		fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
351 	} else {
352 
353 		/*
354 		 * Previous inline data or directory always reserved 200 bytes
355 		 * in inode layout, even if inline_xattr is disabled. In order
356 		 * to keep inline_dentry's structure for backward compatibility,
357 		 * we get the space back only from inline_data.
358 		 */
359 		fi->i_inline_xattr_size = 0;
360 	}
361 
362 	if (!sanity_check_inode(inode, node_page)) {
363 		f2fs_put_page(node_page, 1);
364 		return -EINVAL;
365 	}
366 
367 	/* check data exist */
368 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
369 		__recover_inline_status(inode, node_page);
370 
371 	/* get rdev by using inline_info */
372 	__get_inode_rdev(inode, ri);
373 
374 	if (S_ISREG(inode->i_mode)) {
375 		err = __written_first_block(sbi, ri);
376 		if (err < 0) {
377 			f2fs_put_page(node_page, 1);
378 			return err;
379 		}
380 		if (!err)
381 			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
382 	}
383 
384 	if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
385 		fi->last_disk_size = inode->i_size;
386 
387 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
388 		set_inode_flag(inode, FI_PROJ_INHERIT);
389 
390 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi->sb) &&
391 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
392 		i_projid = (projid_t)le32_to_cpu(ri->i_projid);
393 	else
394 		i_projid = F2FS_DEF_PROJID;
395 	fi->i_projid = make_kprojid(&init_user_ns, i_projid);
396 
397 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi->sb) &&
398 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
399 		fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
400 		fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
401 	}
402 
403 	F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
404 	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
405 	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
406 	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
407 	f2fs_put_page(node_page, 1);
408 
409 	stat_inc_inline_xattr(inode);
410 	stat_inc_inline_inode(inode);
411 	stat_inc_inline_dir(inode);
412 
413 	return 0;
414 }
415 
f2fs_iget(struct super_block * sb,unsigned long ino)416 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
417 {
418 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
419 	struct inode *inode;
420 	int ret = 0;
421 
422 	inode = iget_locked(sb, ino);
423 	if (!inode)
424 		return ERR_PTR(-ENOMEM);
425 
426 	if (!(inode->i_state & I_NEW)) {
427 		trace_f2fs_iget(inode);
428 		return inode;
429 	}
430 	if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
431 		goto make_now;
432 
433 	ret = do_read_inode(inode);
434 	if (ret)
435 		goto bad_inode;
436 make_now:
437 	if (ino == F2FS_NODE_INO(sbi)) {
438 		inode->i_mapping->a_ops = &f2fs_node_aops;
439 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
440 	} else if (ino == F2FS_META_INO(sbi)) {
441 		inode->i_mapping->a_ops = &f2fs_meta_aops;
442 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
443 	} else if (S_ISREG(inode->i_mode)) {
444 		inode->i_op = &f2fs_file_inode_operations;
445 		inode->i_fop = &f2fs_file_operations;
446 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
447 	} else if (S_ISDIR(inode->i_mode)) {
448 		inode->i_op = &f2fs_dir_inode_operations;
449 		inode->i_fop = &f2fs_dir_operations;
450 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
451 		inode_nohighmem(inode);
452 	} else if (S_ISLNK(inode->i_mode)) {
453 		if (f2fs_encrypted_inode(inode))
454 			inode->i_op = &f2fs_encrypted_symlink_inode_operations;
455 		else
456 			inode->i_op = &f2fs_symlink_inode_operations;
457 		inode_nohighmem(inode);
458 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
459 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
460 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
461 		inode->i_op = &f2fs_special_inode_operations;
462 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
463 	} else {
464 		ret = -EIO;
465 		goto bad_inode;
466 	}
467 	f2fs_set_inode_flags(inode);
468 	unlock_new_inode(inode);
469 	trace_f2fs_iget(inode);
470 	return inode;
471 
472 bad_inode:
473 	iget_failed(inode);
474 	trace_f2fs_iget_exit(inode, ret);
475 	return ERR_PTR(ret);
476 }
477 
f2fs_iget_retry(struct super_block * sb,unsigned long ino)478 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
479 {
480 	struct inode *inode;
481 retry:
482 	inode = f2fs_iget(sb, ino);
483 	if (IS_ERR(inode)) {
484 		if (PTR_ERR(inode) == -ENOMEM) {
485 			congestion_wait(BLK_RW_ASYNC, HZ/50);
486 			goto retry;
487 		}
488 	}
489 	return inode;
490 }
491 
f2fs_update_inode(struct inode * inode,struct page * node_page)492 void f2fs_update_inode(struct inode *inode, struct page *node_page)
493 {
494 	struct f2fs_inode *ri;
495 	struct extent_tree *et = F2FS_I(inode)->extent_tree;
496 
497 	f2fs_wait_on_page_writeback(node_page, NODE, true);
498 	set_page_dirty(node_page);
499 
500 	f2fs_inode_synced(inode);
501 
502 	ri = F2FS_INODE(node_page);
503 
504 	ri->i_mode = cpu_to_le16(inode->i_mode);
505 	ri->i_advise = F2FS_I(inode)->i_advise;
506 	ri->i_uid = cpu_to_le32(i_uid_read(inode));
507 	ri->i_gid = cpu_to_le32(i_gid_read(inode));
508 	ri->i_links = cpu_to_le32(inode->i_nlink);
509 	ri->i_size = cpu_to_le64(i_size_read(inode));
510 	ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
511 
512 	if (et) {
513 		read_lock(&et->lock);
514 		set_raw_extent(&et->largest, &ri->i_ext);
515 		read_unlock(&et->lock);
516 	} else {
517 		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
518 	}
519 	set_raw_inline(inode, ri);
520 
521 	ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
522 	ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
523 	ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
524 	ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
525 	ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
526 	ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
527 	if (S_ISDIR(inode->i_mode))
528 		ri->i_current_depth =
529 			cpu_to_le32(F2FS_I(inode)->i_current_depth);
530 	else if (S_ISREG(inode->i_mode))
531 		ri->i_gc_failures =
532 			cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
533 	ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
534 	ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
535 	ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
536 	ri->i_generation = cpu_to_le32(inode->i_generation);
537 	ri->i_dir_level = F2FS_I(inode)->i_dir_level;
538 
539 	if (f2fs_has_extra_attr(inode)) {
540 		ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
541 
542 		if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)->sb))
543 			ri->i_inline_xattr_size =
544 				cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
545 
546 		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)->sb) &&
547 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
548 								i_projid)) {
549 			projid_t i_projid;
550 
551 			i_projid = from_kprojid(&init_user_ns,
552 						F2FS_I(inode)->i_projid);
553 			ri->i_projid = cpu_to_le32(i_projid);
554 		}
555 
556 		if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)->sb) &&
557 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
558 								i_crtime)) {
559 			ri->i_crtime =
560 				cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
561 			ri->i_crtime_nsec =
562 				cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
563 		}
564 	}
565 
566 	__set_inode_rdev(inode, ri);
567 
568 	/* deleted inode */
569 	if (inode->i_nlink == 0)
570 		clear_inline_node(node_page);
571 
572 	F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
573 	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
574 	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
575 	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
576 
577 #ifdef CONFIG_F2FS_CHECK_FS
578 	f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
579 #endif
580 }
581 
f2fs_update_inode_page(struct inode * inode)582 void f2fs_update_inode_page(struct inode *inode)
583 {
584 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
585 	struct page *node_page;
586 retry:
587 	node_page = f2fs_get_node_page(sbi, inode->i_ino);
588 	if (IS_ERR(node_page)) {
589 		int err = PTR_ERR(node_page);
590 		if (err == -ENOMEM) {
591 			cond_resched();
592 			goto retry;
593 		} else if (err != -ENOENT) {
594 			f2fs_stop_checkpoint(sbi, false);
595 		}
596 		return;
597 	}
598 	f2fs_update_inode(inode, node_page);
599 	f2fs_put_page(node_page, 1);
600 }
601 
f2fs_write_inode(struct inode * inode,struct writeback_control * wbc)602 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
603 {
604 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
605 
606 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
607 			inode->i_ino == F2FS_META_INO(sbi))
608 		return 0;
609 
610 	if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
611 		return 0;
612 
613 	/*
614 	 * We need to balance fs here to prevent from producing dirty node pages
615 	 * during the urgent cleaning time when runing out of free sections.
616 	 */
617 	f2fs_update_inode_page(inode);
618 	if (wbc && wbc->nr_to_write)
619 		f2fs_balance_fs(sbi, true);
620 	return 0;
621 }
622 
623 /*
624  * Called at the last iput() if i_nlink is zero
625  */
f2fs_evict_inode(struct inode * inode)626 void f2fs_evict_inode(struct inode *inode)
627 {
628 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
629 	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
630 	int err = 0;
631 
632 	/* some remained atomic pages should discarded */
633 	if (f2fs_is_atomic_file(inode))
634 		f2fs_drop_inmem_pages(inode);
635 
636 	trace_f2fs_evict_inode(inode);
637 	truncate_inode_pages_final(&inode->i_data);
638 
639 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
640 			inode->i_ino == F2FS_META_INO(sbi))
641 		goto out_clear;
642 
643 	f2fs_bug_on(sbi, get_dirty_pages(inode));
644 	f2fs_remove_dirty_inode(inode);
645 
646 	f2fs_destroy_extent_tree(inode);
647 
648 	if (inode->i_nlink || is_bad_inode(inode))
649 		goto no_delete;
650 
651 	dquot_initialize(inode);
652 
653 	f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
654 	f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
655 	f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
656 
657 	sb_start_intwrite(inode->i_sb);
658 	set_inode_flag(inode, FI_NO_ALLOC);
659 	i_size_write(inode, 0);
660 retry:
661 	if (F2FS_HAS_BLOCKS(inode))
662 		err = f2fs_truncate(inode);
663 
664 	if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
665 		f2fs_show_injection_info(FAULT_EVICT_INODE);
666 		err = -EIO;
667 	}
668 
669 	if (!err) {
670 		f2fs_lock_op(sbi);
671 		err = f2fs_remove_inode_page(inode);
672 		f2fs_unlock_op(sbi);
673 		if (err == -ENOENT)
674 			err = 0;
675 	}
676 
677 	/* give more chances, if ENOMEM case */
678 	if (err == -ENOMEM) {
679 		err = 0;
680 		goto retry;
681 	}
682 
683 	if (err)
684 		f2fs_update_inode_page(inode);
685 	dquot_free_inode(inode);
686 	sb_end_intwrite(inode->i_sb);
687 no_delete:
688 	dquot_drop(inode);
689 
690 	stat_dec_inline_xattr(inode);
691 	stat_dec_inline_dir(inode);
692 	stat_dec_inline_inode(inode);
693 
694 	if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG)))
695 		f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
696 	else
697 		f2fs_inode_synced(inode);
698 
699 	/* ino == 0, if f2fs_new_inode() was failed t*/
700 	if (inode->i_ino)
701 		invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
702 							inode->i_ino);
703 	if (xnid)
704 		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
705 	if (inode->i_nlink) {
706 		if (is_inode_flag_set(inode, FI_APPEND_WRITE))
707 			f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
708 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
709 			f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
710 	}
711 	if (is_inode_flag_set(inode, FI_FREE_NID)) {
712 		f2fs_alloc_nid_failed(sbi, inode->i_ino);
713 		clear_inode_flag(inode, FI_FREE_NID);
714 	} else {
715 		/*
716 		 * If xattr nid is corrupted, we can reach out error condition,
717 		 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
718 		 * In that case, f2fs_check_nid_range() is enough to give a clue.
719 		 */
720 	}
721 out_clear:
722 	fscrypt_put_encryption_info(inode);
723 	clear_inode(inode);
724 }
725 
726 /* caller should call f2fs_lock_op() */
f2fs_handle_failed_inode(struct inode * inode)727 void f2fs_handle_failed_inode(struct inode *inode)
728 {
729 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
730 	struct node_info ni;
731 	int err;
732 
733 	/*
734 	 * clear nlink of inode in order to release resource of inode
735 	 * immediately.
736 	 */
737 	clear_nlink(inode);
738 
739 	/*
740 	 * we must call this to avoid inode being remained as dirty, resulting
741 	 * in a panic when flushing dirty inodes in gdirty_list.
742 	 */
743 	f2fs_update_inode_page(inode);
744 	f2fs_inode_synced(inode);
745 
746 	/* don't make bad inode, since it becomes a regular file. */
747 	unlock_new_inode(inode);
748 
749 	/*
750 	 * Note: we should add inode to orphan list before f2fs_unlock_op()
751 	 * so we can prevent losing this orphan when encoutering checkpoint
752 	 * and following suddenly power-off.
753 	 */
754 	err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
755 	if (err) {
756 		set_sbi_flag(sbi, SBI_NEED_FSCK);
757 		f2fs_msg(sbi->sb, KERN_WARNING,
758 			"May loss orphan inode, run fsck to fix.");
759 		goto out;
760 	}
761 
762 	if (ni.blk_addr != NULL_ADDR) {
763 		err = f2fs_acquire_orphan_inode(sbi);
764 		if (err) {
765 			set_sbi_flag(sbi, SBI_NEED_FSCK);
766 			f2fs_msg(sbi->sb, KERN_WARNING,
767 				"Too many orphan inodes, run fsck to fix.");
768 		} else {
769 			f2fs_add_orphan_inode(inode);
770 		}
771 		f2fs_alloc_nid_done(sbi, inode->i_ino);
772 	} else {
773 		set_inode_flag(inode, FI_FREE_NID);
774 	}
775 
776 out:
777 	f2fs_unlock_op(sbi);
778 
779 	/* iput will drop the inode object */
780 	iput(inode);
781 }
782