1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * linux/drivers/staging/erofs/unzip_vle.h
4  *
5  * Copyright (C) 2018 HUAWEI, Inc.
6  *             http://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file COPYING in the main directory of the Linux
11  * distribution for more details.
12  */
13 #ifndef __EROFS_FS_UNZIP_VLE_H
14 #define __EROFS_FS_UNZIP_VLE_H
15 
16 #include "internal.h"
17 #include "unzip_pagevec.h"
18 
19 /*
20  *  - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) -
21  * used for temporary allocated pages (via erofs_allocpage),
22  * in order to seperate those from NULL mapping (eg. truncated pages)
23  */
24 #define Z_EROFS_MAPPING_STAGING		((void *)0x5A110C8D)
25 
26 #define z_erofs_is_stagingpage(page)	\
27 	((page)->mapping == Z_EROFS_MAPPING_STAGING)
28 
z_erofs_gather_if_stagingpage(struct list_head * page_pool,struct page * page)29 static inline bool z_erofs_gather_if_stagingpage(struct list_head *page_pool,
30 						 struct page *page)
31 {
32 	if (z_erofs_is_stagingpage(page)) {
33 		list_add(&page->lru, page_pool);
34 		return true;
35 	}
36 	return false;
37 }
38 
39 /*
40  * Structure fields follow one of the following exclusion rules.
41  *
42  * I: Modifiable by initialization/destruction paths and read-only
43  *    for everyone else.
44  *
45  */
46 
47 #define Z_EROFS_VLE_INLINE_PAGEVECS     3
48 
49 struct z_erofs_vle_work {
50 	/* struct z_erofs_vle_work *left, *right; */
51 
52 #ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
53 	struct list_head list;
54 
55 	atomic_t refcount;
56 #endif
57 	struct mutex lock;
58 
59 	/* I: decompression offset in page */
60 	unsigned short pageofs;
61 	unsigned short nr_pages;
62 
63 	/* L: queued pages in pagevec[] */
64 	unsigned vcnt;
65 
66 	union {
67 		/* L: pagevec */
68 		erofs_vtptr_t pagevec[Z_EROFS_VLE_INLINE_PAGEVECS];
69 		struct rcu_head rcu;
70 	};
71 };
72 
73 #define Z_EROFS_VLE_WORKGRP_FMT_PLAIN        0
74 #define Z_EROFS_VLE_WORKGRP_FMT_LZ4          1
75 #define Z_EROFS_VLE_WORKGRP_FMT_MASK         1
76 
77 typedef struct z_erofs_vle_workgroup *z_erofs_vle_owned_workgrp_t;
78 
79 struct z_erofs_vle_workgroup {
80 	struct erofs_workgroup obj;
81 	struct z_erofs_vle_work work;
82 
83 	/* next owned workgroup */
84 	z_erofs_vle_owned_workgrp_t next;
85 
86 	/* compressed pages (including multi-usage pages) */
87 	struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];
88 	unsigned int llen, flags;
89 };
90 
91 /* let's avoid the valid 32-bit kernel addresses */
92 
93 /* the chained workgroup has't submitted io (still open) */
94 #define Z_EROFS_VLE_WORKGRP_TAIL        ((void *)0x5F0ECAFE)
95 /* the chained workgroup has already submitted io */
96 #define Z_EROFS_VLE_WORKGRP_TAIL_CLOSED ((void *)0x5F0EDEAD)
97 
98 #define Z_EROFS_VLE_WORKGRP_NIL         (NULL)
99 
100 #define z_erofs_vle_workgrp_fmt(grp)	\
101 	((grp)->flags & Z_EROFS_VLE_WORKGRP_FMT_MASK)
102 
z_erofs_vle_set_workgrp_fmt(struct z_erofs_vle_workgroup * grp,unsigned int fmt)103 static inline void z_erofs_vle_set_workgrp_fmt(
104 	struct z_erofs_vle_workgroup *grp,
105 	unsigned int fmt)
106 {
107 	grp->flags = fmt | (grp->flags & ~Z_EROFS_VLE_WORKGRP_FMT_MASK);
108 }
109 
110 #ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
111 #error multiref decompression is unimplemented yet
112 #else
113 
114 #define z_erofs_vle_grab_primary_work(grp)	(&(grp)->work)
115 #define z_erofs_vle_grab_work(grp, pageofs)	(&(grp)->work)
116 #define z_erofs_vle_work_workgroup(wrk, primary)	\
117 	((primary) ? container_of(wrk,	\
118 		struct z_erofs_vle_workgroup, work) : \
119 		({ BUG(); (void *)NULL; }))
120 
121 #endif
122 
123 #define Z_EROFS_WORKGROUP_SIZE       sizeof(struct z_erofs_vle_workgroup)
124 
125 struct z_erofs_vle_unzip_io {
126 	atomic_t pending_bios;
127 	z_erofs_vle_owned_workgrp_t head;
128 
129 	union {
130 		wait_queue_head_t wait;
131 		struct work_struct work;
132 	} u;
133 };
134 
135 struct z_erofs_vle_unzip_io_sb {
136 	struct z_erofs_vle_unzip_io io;
137 	struct super_block *sb;
138 };
139 
140 #define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
141 #define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
142 #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT  (Z_EROFS_ONLINEPAGE_COUNT_BITS)
143 
144 /*
145  * waiters (aka. ongoing_packs): # to unlock the page
146  * sub-index: 0 - for partial page, >= 1 full page sub-index
147  */
148 typedef atomic_t z_erofs_onlinepage_t;
149 
150 /* type punning */
151 union z_erofs_onlinepage_converter {
152 	z_erofs_onlinepage_t *o;
153 	unsigned long *v;
154 };
155 
z_erofs_onlinepage_index(struct page * page)156 static inline unsigned z_erofs_onlinepage_index(struct page *page)
157 {
158 	union z_erofs_onlinepage_converter u;
159 
160 	BUG_ON(!PagePrivate(page));
161 	u.v = &page_private(page);
162 
163 	return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
164 }
165 
z_erofs_onlinepage_init(struct page * page)166 static inline void z_erofs_onlinepage_init(struct page *page)
167 {
168 	union {
169 		z_erofs_onlinepage_t o;
170 		unsigned long v;
171 	/* keep from being unlocked in advance */
172 	} u = { .o = ATOMIC_INIT(1) };
173 
174 	set_page_private(page, u.v);
175 	smp_wmb();
176 	SetPagePrivate(page);
177 }
178 
z_erofs_onlinepage_fixup(struct page * page,uintptr_t index,bool down)179 static inline void z_erofs_onlinepage_fixup(struct page *page,
180 	uintptr_t index, bool down)
181 {
182 	unsigned long *p, o, v, id;
183 repeat:
184 	p = &page_private(page);
185 	o = READ_ONCE(*p);
186 
187 	id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
188 	if (id) {
189 		if (!index)
190 			return;
191 
192 		BUG_ON(id != index);
193 	}
194 
195 	v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
196 		((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned)down);
197 	if (cmpxchg(p, o, v) != o)
198 		goto repeat;
199 }
200 
z_erofs_onlinepage_endio(struct page * page)201 static inline void z_erofs_onlinepage_endio(struct page *page)
202 {
203 	union z_erofs_onlinepage_converter u;
204 	unsigned v;
205 
206 	BUG_ON(!PagePrivate(page));
207 	u.v = &page_private(page);
208 
209 	v = atomic_dec_return(u.o);
210 	if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
211 		ClearPagePrivate(page);
212 		if (!PageError(page))
213 			SetPageUptodate(page);
214 		unlock_page(page);
215 	}
216 
217 	debugln("%s, page %p value %x", __func__, page, atomic_read(u.o));
218 }
219 
220 #define Z_EROFS_VLE_VMAP_ONSTACK_PAGES	\
221 	min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
222 #define Z_EROFS_VLE_VMAP_GLOBAL_PAGES	2048
223 
224 /* unzip_vle_lz4.c */
225 extern int z_erofs_vle_plain_copy(struct page **compressed_pages,
226 	unsigned clusterpages, struct page **pages,
227 	unsigned nr_pages, unsigned short pageofs);
228 
229 extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
230 	unsigned clusterpages, struct page **pages,
231 	unsigned outlen, unsigned short pageofs,
232 	void (*endio)(struct page *));
233 
234 extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
235 	unsigned clusterpages, void *vaddr, unsigned llen,
236 	unsigned short pageofs, bool overlapped);
237 
238 #endif
239 
240