1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * linux/drivers/staging/erofs/internal.h
4  *
5  * Copyright (C) 2017-2018 HUAWEI, Inc.
6  *             http://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file COPYING in the main directory of the Linux
11  * distribution for more details.
12  */
13 #ifndef __INTERNAL_H
14 #define __INTERNAL_H
15 
16 #include <linux/fs.h>
17 #include <linux/dcache.h>
18 #include <linux/mm.h>
19 #include <linux/pagemap.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/cleancache.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include "erofs_fs.h"
26 
27 /* redefine pr_fmt "erofs: " */
28 #undef pr_fmt
29 #define pr_fmt(fmt) "erofs: " fmt
30 
31 #define errln(x, ...)   pr_err(x "\n", ##__VA_ARGS__)
32 #define infoln(x, ...)  pr_info(x "\n", ##__VA_ARGS__)
33 #ifdef CONFIG_EROFS_FS_DEBUG
34 #define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
35 
36 #define dbg_might_sleep         might_sleep
37 #define DBG_BUGON               BUG_ON
38 #else
39 #define debugln(x, ...)         ((void)0)
40 
41 #define dbg_might_sleep()       ((void)0)
42 #define DBG_BUGON(...)          ((void)0)
43 #endif
44 
45 #ifdef CONFIG_EROFS_FAULT_INJECTION
46 enum {
47 	FAULT_KMALLOC,
48 	FAULT_MAX,
49 };
50 
51 extern char *erofs_fault_name[FAULT_MAX];
52 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
53 
54 struct erofs_fault_info {
55 	atomic_t inject_ops;
56 	unsigned int inject_rate;
57 	unsigned int inject_type;
58 };
59 #endif
60 
61 #ifdef CONFIG_EROFS_FS_ZIP_CACHE_BIPOLAR
62 #define EROFS_FS_ZIP_CACHE_LVL	(2)
63 #elif defined(EROFS_FS_ZIP_CACHE_UNIPOLAR)
64 #define EROFS_FS_ZIP_CACHE_LVL	(1)
65 #else
66 #define EROFS_FS_ZIP_CACHE_LVL	(0)
67 #endif
68 
69 #if (!defined(EROFS_FS_HAS_MANAGED_CACHE) && (EROFS_FS_ZIP_CACHE_LVL > 0))
70 #define EROFS_FS_HAS_MANAGED_CACHE
71 #endif
72 
73 /* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
74 #define EROFS_SUPER_MAGIC   EROFS_SUPER_MAGIC_V1
75 
76 typedef u64 erofs_nid_t;
77 
78 struct erofs_sb_info {
79 	/* list for all registered superblocks, mainly for shrinker */
80 	struct list_head list;
81 	struct mutex umount_mutex;
82 
83 	u32 blocks;
84 	u32 meta_blkaddr;
85 #ifdef CONFIG_EROFS_FS_XATTR
86 	u32 xattr_blkaddr;
87 #endif
88 
89 	/* inode slot unit size in bit shift */
90 	unsigned char islotbits;
91 #ifdef CONFIG_EROFS_FS_ZIP
92 	/* cluster size in bit shift */
93 	unsigned char clusterbits;
94 
95 	/* the dedicated workstation for compression */
96 	struct radix_tree_root workstn_tree;
97 
98 #ifdef EROFS_FS_HAS_MANAGED_CACHE
99 	struct inode *managed_cache;
100 #endif
101 
102 #endif
103 
104 	u32 build_time_nsec;
105 	u64 build_time;
106 
107 	/* what we really care is nid, rather than ino.. */
108 	erofs_nid_t root_nid;
109 	/* used for statfs, f_files - f_favail */
110 	u64 inos;
111 
112 	u8 uuid[16];                    /* 128-bit uuid for volume */
113 	u8 volume_name[16];             /* volume name */
114 	char *dev_name;
115 
116 	unsigned int mount_opt;
117 	unsigned int shrinker_run_no;
118 
119 #ifdef CONFIG_EROFS_FAULT_INJECTION
120 	struct erofs_fault_info fault_info;	/* For fault injection */
121 #endif
122 };
123 
124 #ifdef CONFIG_EROFS_FAULT_INJECTION
125 #define erofs_show_injection_info(type)					\
126 	infoln("inject %s in %s of %pS", erofs_fault_name[type],        \
127 		__func__, __builtin_return_address(0))
128 
time_to_inject(struct erofs_sb_info * sbi,int type)129 static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
130 {
131 	struct erofs_fault_info *ffi = &sbi->fault_info;
132 
133 	if (!ffi->inject_rate)
134 		return false;
135 
136 	if (!IS_FAULT_SET(ffi, type))
137 		return false;
138 
139 	atomic_inc(&ffi->inject_ops);
140 	if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
141 		atomic_set(&ffi->inject_ops, 0);
142 		return true;
143 	}
144 	return false;
145 }
146 #endif
147 
erofs_kmalloc(struct erofs_sb_info * sbi,size_t size,gfp_t flags)148 static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
149 					size_t size, gfp_t flags)
150 {
151 #ifdef CONFIG_EROFS_FAULT_INJECTION
152 	if (time_to_inject(sbi, FAULT_KMALLOC)) {
153 		erofs_show_injection_info(FAULT_KMALLOC);
154 		return NULL;
155 	}
156 #endif
157 	return kmalloc(size, flags);
158 }
159 
160 #define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
161 #define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
162 
163 /* Mount flags set via mount options or defaults */
164 #define EROFS_MOUNT_XATTR_USER		0x00000010
165 #define EROFS_MOUNT_POSIX_ACL		0x00000020
166 #define EROFS_MOUNT_FAULT_INJECTION	0x00000040
167 
168 #define clear_opt(sbi, option)	((sbi)->mount_opt &= ~EROFS_MOUNT_##option)
169 #define set_opt(sbi, option)	((sbi)->mount_opt |= EROFS_MOUNT_##option)
170 #define test_opt(sbi, option)	((sbi)->mount_opt & EROFS_MOUNT_##option)
171 
172 #ifdef CONFIG_EROFS_FS_ZIP
173 #define erofs_workstn_lock(sbi)         xa_lock(&(sbi)->workstn_tree)
174 #define erofs_workstn_unlock(sbi)       xa_unlock(&(sbi)->workstn_tree)
175 
176 /* basic unit of the workstation of a super_block */
177 struct erofs_workgroup {
178 	/* the workgroup index in the workstation */
179 	pgoff_t index;
180 
181 	/* overall workgroup reference count */
182 	atomic_t refcount;
183 };
184 
185 #define EROFS_LOCKED_MAGIC     (INT_MIN | 0xE0F510CCL)
186 
erofs_workgroup_try_to_freeze(struct erofs_workgroup * grp,int v)187 static inline bool erofs_workgroup_try_to_freeze(
188 	struct erofs_workgroup *grp, int v)
189 {
190 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
191 	if (v != atomic_cmpxchg(&grp->refcount,
192 		v, EROFS_LOCKED_MAGIC))
193 		return false;
194 	preempt_disable();
195 #else
196 	preempt_disable();
197 	if (atomic_read(&grp->refcount) != v) {
198 		preempt_enable();
199 		return false;
200 	}
201 #endif
202 	return true;
203 }
204 
erofs_workgroup_unfreeze(struct erofs_workgroup * grp,int v)205 static inline void erofs_workgroup_unfreeze(
206 	struct erofs_workgroup *grp, int v)
207 {
208 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
209 	atomic_set(&grp->refcount, v);
210 #endif
211 	preempt_enable();
212 }
213 
erofs_workgroup_get(struct erofs_workgroup * grp,int * ocnt)214 static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
215 {
216 	const int locked = (int)EROFS_LOCKED_MAGIC;
217 	int o;
218 
219 repeat:
220 	o = atomic_read(&grp->refcount);
221 
222 	/* spin if it is temporarily locked at the reclaim path */
223 	if (unlikely(o == locked)) {
224 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
225 		do
226 			cpu_relax();
227 		while (atomic_read(&grp->refcount) == locked);
228 #endif
229 		goto repeat;
230 	}
231 
232 	if (unlikely(o <= 0))
233 		return -1;
234 
235 	if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o))
236 		goto repeat;
237 
238 	*ocnt = o;
239 	return 0;
240 }
241 
242 #define __erofs_workgroup_get(grp)	atomic_inc(&(grp)->refcount)
243 
244 extern int erofs_workgroup_put(struct erofs_workgroup *grp);
245 
246 extern struct erofs_workgroup *erofs_find_workgroup(
247 	struct super_block *sb, pgoff_t index, bool *tag);
248 
249 extern int erofs_register_workgroup(struct super_block *sb,
250 	struct erofs_workgroup *grp, bool tag);
251 
252 extern unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
253 	unsigned long nr_shrink, bool cleanup);
254 
erofs_workstation_cleanup_all(struct super_block * sb)255 static inline void erofs_workstation_cleanup_all(struct super_block *sb)
256 {
257 	erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true);
258 }
259 
260 #ifdef EROFS_FS_HAS_MANAGED_CACHE
261 #define EROFS_UNALLOCATED_CACHED_PAGE	((void *)0x5F0EF00D)
262 
263 extern int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
264 	struct erofs_workgroup *egrp);
265 extern int erofs_try_to_free_cached_page(struct address_space *mapping,
266 	struct page *page);
267 #endif
268 
269 #endif
270 
271 /* we strictly follow PAGE_SIZE and no buffer head yet */
272 #define LOG_BLOCK_SIZE		PAGE_SHIFT
273 
274 #undef LOG_SECTORS_PER_BLOCK
275 #define LOG_SECTORS_PER_BLOCK	(PAGE_SHIFT - 9)
276 
277 #undef SECTORS_PER_BLOCK
278 #define SECTORS_PER_BLOCK	(1 << SECTORS_PER_BLOCK)
279 
280 #define EROFS_BLKSIZ		(1 << LOG_BLOCK_SIZE)
281 
282 #if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
283 #error erofs cannot be used in this platform
284 #endif
285 
286 #define ROOT_NID(sb)		((sb)->root_nid)
287 
288 #ifdef CONFIG_EROFS_FS_ZIP
289 /* hard limit of pages per compressed cluster */
290 #define Z_EROFS_CLUSTER_MAX_PAGES       (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
291 
292 /* page count of a compressed cluster */
293 #define erofs_clusterpages(sbi)         ((1 << (sbi)->clusterbits) / PAGE_SIZE)
294 #endif
295 
296 typedef u64 erofs_off_t;
297 
298 /* data type for filesystem-wide blocks number */
299 typedef u32 erofs_blk_t;
300 
301 #define erofs_blknr(addr)       ((addr) / EROFS_BLKSIZ)
302 #define erofs_blkoff(addr)      ((addr) % EROFS_BLKSIZ)
303 #define blknr_to_addr(nr)       ((erofs_off_t)(nr) * EROFS_BLKSIZ)
304 
iloc(struct erofs_sb_info * sbi,erofs_nid_t nid)305 static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
306 {
307 	return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
308 }
309 
310 #define inode_set_inited_xattr(inode)   (EROFS_V(inode)->flags |= 1)
311 #define inode_has_inited_xattr(inode)   (EROFS_V(inode)->flags & 1)
312 
313 struct erofs_vnode {
314 	erofs_nid_t nid;
315 	unsigned int flags;
316 
317 	unsigned char data_mapping_mode;
318 	/* inline size in bytes */
319 	unsigned char inode_isize;
320 	unsigned short xattr_isize;
321 
322 	unsigned xattr_shared_count;
323 	unsigned *xattr_shared_xattrs;
324 
325 	erofs_blk_t raw_blkaddr;
326 
327 	/* the corresponding vfs inode */
328 	struct inode vfs_inode;
329 };
330 
331 #define EROFS_V(ptr)	\
332 	container_of(ptr, struct erofs_vnode, vfs_inode)
333 
334 #define __inode_advise(x, bit, bits) \
335 	(((x) >> (bit)) & ((1 << (bits)) - 1))
336 
337 #define __inode_version(advise)	\
338 	__inode_advise(advise, EROFS_I_VERSION_BIT,	\
339 		EROFS_I_VERSION_BITS)
340 
341 #define __inode_data_mapping(advise)	\
342 	__inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\
343 		EROFS_I_DATA_MAPPING_BITS)
344 
inode_datablocks(struct inode * inode)345 static inline unsigned long inode_datablocks(struct inode *inode)
346 {
347 	/* since i_size cannot be changed */
348 	return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
349 }
350 
is_inode_layout_plain(struct inode * inode)351 static inline bool is_inode_layout_plain(struct inode *inode)
352 {
353 	return EROFS_V(inode)->data_mapping_mode == EROFS_INODE_LAYOUT_PLAIN;
354 }
355 
is_inode_layout_compression(struct inode * inode)356 static inline bool is_inode_layout_compression(struct inode *inode)
357 {
358 	return EROFS_V(inode)->data_mapping_mode ==
359 					EROFS_INODE_LAYOUT_COMPRESSION;
360 }
361 
is_inode_layout_inline(struct inode * inode)362 static inline bool is_inode_layout_inline(struct inode *inode)
363 {
364 	return EROFS_V(inode)->data_mapping_mode == EROFS_INODE_LAYOUT_INLINE;
365 }
366 
367 extern const struct super_operations erofs_sops;
368 extern const struct inode_operations erofs_dir_iops;
369 extern const struct file_operations erofs_dir_fops;
370 
371 extern const struct address_space_operations erofs_raw_access_aops;
372 #ifdef CONFIG_EROFS_FS_ZIP
373 extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
374 #endif
375 
376 /*
377  * Logical to physical block mapping, used by erofs_map_blocks()
378  *
379  * Different with other file systems, it is used for 2 access modes:
380  *
381  * 1) RAW access mode:
382  *
383  * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
384  * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
385  *
386  * Note that m_lblk in the RAW access mode refers to the number of
387  * the compressed ondisk block rather than the uncompressed
388  * in-memory block for the compressed file.
389  *
390  * m_pofs equals to m_lofs except for the inline data page.
391  *
392  * 2) Normal access mode:
393  *
394  * If the inode is not compressed, it has no difference with
395  * the RAW access mode. However, if the inode is compressed,
396  * users should pass a valid (m_lblk, m_lofs) pair, and get
397  * the needed m_pblk, m_pofs, m_len to get the compressed data
398  * and the updated m_lblk, m_lofs which indicates the start
399  * of the corresponding uncompressed data in the file.
400  */
401 enum {
402 	BH_Zipped = BH_PrivateStart,
403 };
404 
405 /* Has a disk mapping */
406 #define EROFS_MAP_MAPPED	(1 << BH_Mapped)
407 /* Located in metadata (could be copied from bd_inode) */
408 #define EROFS_MAP_META		(1 << BH_Meta)
409 /* The extent has been compressed */
410 #define EROFS_MAP_ZIPPED	(1 << BH_Zipped)
411 
412 struct erofs_map_blocks {
413 	erofs_off_t m_pa, m_la;
414 	u64 m_plen, m_llen;
415 
416 	unsigned int m_flags;
417 };
418 
419 /* Flags used by erofs_map_blocks() */
420 #define EROFS_GET_BLOCKS_RAW    0x0001
421 
422 /* data.c */
prepare_bio(struct super_block * sb,erofs_blk_t blkaddr,unsigned nr_pages,bio_end_io_t endio)423 static inline struct bio *prepare_bio(
424 	struct super_block *sb,
425 	erofs_blk_t blkaddr, unsigned nr_pages,
426 	bio_end_io_t endio)
427 {
428 	gfp_t gfp = GFP_NOIO;
429 	struct bio *bio = bio_alloc(gfp, nr_pages);
430 
431 	if (unlikely(bio == NULL) &&
432 		(current->flags & PF_MEMALLOC)) {
433 		do {
434 			nr_pages /= 2;
435 			if (unlikely(!nr_pages)) {
436 				bio = bio_alloc(gfp | __GFP_NOFAIL, 1);
437 				BUG_ON(bio == NULL);
438 				break;
439 			}
440 			bio = bio_alloc(gfp, nr_pages);
441 		} while (bio == NULL);
442 	}
443 
444 	bio->bi_end_io = endio;
445 	bio_set_dev(bio, sb->s_bdev);
446 	bio->bi_iter.bi_sector = blkaddr << LOG_SECTORS_PER_BLOCK;
447 	return bio;
448 }
449 
__submit_bio(struct bio * bio,unsigned op,unsigned op_flags)450 static inline void __submit_bio(struct bio *bio, unsigned op, unsigned op_flags)
451 {
452 	bio_set_op_attrs(bio, op, op_flags);
453 	submit_bio(bio);
454 }
455 
456 extern struct page *erofs_get_meta_page(struct super_block *sb,
457 	erofs_blk_t blkaddr, bool prio);
458 extern int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
459 extern int erofs_map_blocks_iter(struct inode *, struct erofs_map_blocks *,
460 	struct page **, int);
461 
462 struct erofs_map_blocks_iter {
463 	struct erofs_map_blocks map;
464 	struct page *mpage;
465 };
466 
467 
erofs_get_inline_page(struct inode * inode,erofs_blk_t blkaddr)468 static inline struct page *erofs_get_inline_page(struct inode *inode,
469 	erofs_blk_t blkaddr)
470 {
471 	return erofs_get_meta_page(inode->i_sb,
472 		blkaddr, S_ISDIR(inode->i_mode));
473 }
474 
475 /* inode.c */
476 extern struct inode *erofs_iget(struct super_block *sb,
477 	erofs_nid_t nid, bool dir);
478 
479 /* dir.c */
480 int erofs_namei(struct inode *dir, struct qstr *name,
481 	erofs_nid_t *nid, unsigned *d_type);
482 
483 /* xattr.c */
484 #ifdef CONFIG_EROFS_FS_XATTR
485 extern const struct xattr_handler *erofs_xattr_handlers[];
486 #endif
487 
488 /* symlink */
489 #ifdef CONFIG_EROFS_FS_XATTR
490 extern const struct inode_operations erofs_symlink_xattr_iops;
491 extern const struct inode_operations erofs_fast_symlink_xattr_iops;
492 extern const struct inode_operations erofs_special_inode_operations;
493 #endif
494 
set_inode_fast_symlink(struct inode * inode)495 static inline void set_inode_fast_symlink(struct inode *inode)
496 {
497 #ifdef CONFIG_EROFS_FS_XATTR
498 	inode->i_op = &erofs_fast_symlink_xattr_iops;
499 #else
500 	inode->i_op = &simple_symlink_inode_operations;
501 #endif
502 }
503 
is_inode_fast_symlink(struct inode * inode)504 static inline bool is_inode_fast_symlink(struct inode *inode)
505 {
506 #ifdef CONFIG_EROFS_FS_XATTR
507 	return inode->i_op == &erofs_fast_symlink_xattr_iops;
508 #else
509 	return inode->i_op == &simple_symlink_inode_operations;
510 #endif
511 }
512 
erofs_vmap(struct page ** pages,unsigned int count)513 static inline void *erofs_vmap(struct page **pages, unsigned int count)
514 {
515 #ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
516 	int i = 0;
517 
518 	while (1) {
519 		void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
520 		/* retry two more times (totally 3 times) */
521 		if (addr != NULL || ++i >= 3)
522 			return addr;
523 		vm_unmap_aliases();
524 	}
525 	return NULL;
526 #else
527 	return vmap(pages, count, VM_MAP, PAGE_KERNEL);
528 #endif
529 }
530 
erofs_vunmap(const void * mem,unsigned int count)531 static inline void erofs_vunmap(const void *mem, unsigned int count)
532 {
533 #ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
534 	vm_unmap_ram(mem, count);
535 #else
536 	vunmap(mem);
537 #endif
538 }
539 
540 /* utils.c */
541 extern struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
542 
543 extern void erofs_register_super(struct super_block *sb);
544 extern void erofs_unregister_super(struct super_block *sb);
545 
546 extern unsigned long erofs_shrink_count(struct shrinker *shrink,
547 	struct shrink_control *sc);
548 extern unsigned long erofs_shrink_scan(struct shrinker *shrink,
549 	struct shrink_control *sc);
550 
551 #ifndef lru_to_page
552 #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
553 #endif
554 
555 #endif
556 
557