1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #ifndef __EROFS_FS_ZDATA_H
7 #define __EROFS_FS_ZDATA_H
8 
9 #include "internal.h"
10 #include "zpvec.h"
11 
12 #define Z_EROFS_PCLUSTER_MAX_PAGES	(Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
13 #define Z_EROFS_NR_INLINE_PAGEVECS      3
14 
15 /*
16  * Structure fields follow one of the following exclusion rules.
17  *
18  * I: Modifiable by initialization/destruction paths and read-only
19  *    for everyone else;
20  *
21  * L: Field should be protected by pageset lock;
22  *
23  * A: Field should be accessed / updated in atomic for parallelized code.
24  */
25 struct z_erofs_collection {
26 	struct mutex lock;
27 
28 	/* I: page offset of start position of decompression */
29 	unsigned short pageofs;
30 
31 	/* L: maximum relative page index in pagevec[] */
32 	unsigned short nr_pages;
33 
34 	/* L: total number of pages in pagevec[] */
35 	unsigned int vcnt;
36 
37 	union {
38 		/* L: inline a certain number of pagevecs for bootstrap */
39 		erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
40 
41 		/* I: can be used to free the pcluster by RCU. */
42 		struct rcu_head rcu;
43 	};
44 };
45 
46 #define Z_EROFS_PCLUSTER_FULL_LENGTH    0x00000001
47 #define Z_EROFS_PCLUSTER_LENGTH_BIT     1
48 
49 /*
50  * let's leave a type here in case of introducing
51  * another tagged pointer later.
52  */
53 typedef void *z_erofs_next_pcluster_t;
54 
55 struct z_erofs_pcluster {
56 	struct erofs_workgroup obj;
57 	struct z_erofs_collection primary_collection;
58 
59 	/* A: point to next chained pcluster or TAILs */
60 	z_erofs_next_pcluster_t next;
61 
62 	/* A: lower limit of decompressed length and if full length or not */
63 	unsigned int length;
64 
65 	/* I: physical cluster size in pages */
66 	unsigned short pclusterpages;
67 
68 	/* I: compression algorithm format */
69 	unsigned char algorithmformat;
70 
71 	/* A: compressed pages (can be cached or inplaced pages) */
72 	struct page *compressed_pages[];
73 };
74 
75 #define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
76 
77 /* let's avoid the valid 32-bit kernel addresses */
78 
79 /* the chained workgroup has't submitted io (still open) */
80 #define Z_EROFS_PCLUSTER_TAIL           ((void *)0x5F0ECAFE)
81 /* the chained workgroup has already submitted io */
82 #define Z_EROFS_PCLUSTER_TAIL_CLOSED    ((void *)0x5F0EDEAD)
83 
84 #define Z_EROFS_PCLUSTER_NIL            (NULL)
85 
86 struct z_erofs_decompressqueue {
87 	struct super_block *sb;
88 	atomic_t pending_bios;
89 	z_erofs_next_pcluster_t head;
90 
91 	union {
92 		wait_queue_head_t wait;
93 		struct work_struct work;
94 	} u;
95 };
96 
97 #define MNGD_MAPPING(sbi)	((sbi)->managed_cache->i_mapping)
erofs_page_is_managed(const struct erofs_sb_info * sbi,struct page * page)98 static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
99 					 struct page *page)
100 {
101 	return page->mapping == MNGD_MAPPING(sbi);
102 }
103 
104 #define Z_EROFS_ONLINEPAGE_COUNT_BITS   2
105 #define Z_EROFS_ONLINEPAGE_COUNT_MASK   ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
106 #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT  (Z_EROFS_ONLINEPAGE_COUNT_BITS)
107 
108 /*
109  * waiters (aka. ongoing_packs): # to unlock the page
110  * sub-index: 0 - for partial page, >= 1 full page sub-index
111  */
112 typedef atomic_t z_erofs_onlinepage_t;
113 
114 /* type punning */
115 union z_erofs_onlinepage_converter {
116 	z_erofs_onlinepage_t *o;
117 	unsigned long *v;
118 };
119 
z_erofs_onlinepage_index(struct page * page)120 static inline unsigned int z_erofs_onlinepage_index(struct page *page)
121 {
122 	union z_erofs_onlinepage_converter u;
123 
124 	DBG_BUGON(!PagePrivate(page));
125 	u.v = &page_private(page);
126 
127 	return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
128 }
129 
z_erofs_onlinepage_init(struct page * page)130 static inline void z_erofs_onlinepage_init(struct page *page)
131 {
132 	union {
133 		z_erofs_onlinepage_t o;
134 		unsigned long v;
135 	/* keep from being unlocked in advance */
136 	} u = { .o = ATOMIC_INIT(1) };
137 
138 	set_page_private(page, u.v);
139 	smp_wmb();
140 	SetPagePrivate(page);
141 }
142 
z_erofs_onlinepage_fixup(struct page * page,uintptr_t index,bool down)143 static inline void z_erofs_onlinepage_fixup(struct page *page,
144 	uintptr_t index, bool down)
145 {
146 	union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
147 	int orig, orig_index, val;
148 
149 repeat:
150 	orig = atomic_read(u.o);
151 	orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
152 	if (orig_index) {
153 		if (!index)
154 			return;
155 
156 		DBG_BUGON(orig_index != index);
157 	}
158 
159 	val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
160 		((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
161 	if (atomic_cmpxchg(u.o, orig, val) != orig)
162 		goto repeat;
163 }
164 
z_erofs_onlinepage_endio(struct page * page)165 static inline void z_erofs_onlinepage_endio(struct page *page)
166 {
167 	union z_erofs_onlinepage_converter u;
168 	unsigned int v;
169 
170 	DBG_BUGON(!PagePrivate(page));
171 	u.v = &page_private(page);
172 
173 	v = atomic_dec_return(u.o);
174 	if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
175 		set_page_private(page, 0);
176 		ClearPagePrivate(page);
177 		if (!PageError(page))
178 			SetPageUptodate(page);
179 		unlock_page(page);
180 	}
181 	erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
182 }
183 
184 #define Z_EROFS_VMAP_ONSTACK_PAGES	\
185 	min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
186 #define Z_EROFS_VMAP_GLOBAL_PAGES	2048
187 
188 #endif
189 
190