1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
5 */
6 #include "compress.h"
7 #include <linux/module.h>
8 #include <linux/lz4.h>
9
10 #ifndef LZ4_DISTANCE_MAX /* history window size */
11 #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
12 #endif
13
14 #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
15 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
16 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
17 #endif
18
19 struct z_erofs_lz4_decompress_ctx {
20 struct z_erofs_decompress_req *rq;
21 /* # of encoded, decoded pages */
22 unsigned int inpages, outpages;
23 /* decoded block total length (used for in-place decompression) */
24 unsigned int oend;
25 };
26
z_erofs_load_lz4_config(struct super_block * sb,struct erofs_super_block * dsb,struct z_erofs_lz4_cfgs * lz4,int size)27 int z_erofs_load_lz4_config(struct super_block *sb,
28 struct erofs_super_block *dsb,
29 struct z_erofs_lz4_cfgs *lz4, int size)
30 {
31 struct erofs_sb_info *sbi = EROFS_SB(sb);
32 u16 distance;
33
34 if (lz4) {
35 if (size < sizeof(struct z_erofs_lz4_cfgs)) {
36 erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
37 return -EINVAL;
38 }
39 distance = le16_to_cpu(lz4->max_distance);
40
41 sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
42 if (!sbi->lz4.max_pclusterblks) {
43 sbi->lz4.max_pclusterblks = 1; /* reserved case */
44 } else if (sbi->lz4.max_pclusterblks >
45 Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
46 erofs_err(sb, "too large lz4 pclusterblks %u",
47 sbi->lz4.max_pclusterblks);
48 return -EINVAL;
49 }
50 } else {
51 distance = le16_to_cpu(dsb->u1.lz4_max_distance);
52 sbi->lz4.max_pclusterblks = 1;
53 }
54
55 sbi->lz4.max_distance_pages = distance ?
56 DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
57 LZ4_MAX_DISTANCE_PAGES;
58 return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
59 }
60
61 /*
62 * Fill all gaps with bounce pages if it's a sparse page list. Also check if
63 * all physical pages are consecutive, which can be seen for moderate CR.
64 */
z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx * ctx,struct page ** pagepool)65 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
66 struct page **pagepool)
67 {
68 struct z_erofs_decompress_req *rq = ctx->rq;
69 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
70 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
71 BITS_PER_LONG)] = { 0 };
72 unsigned int lz4_max_distance_pages =
73 EROFS_SB(rq->sb)->lz4.max_distance_pages;
74 void *kaddr = NULL;
75 unsigned int i, j, top;
76
77 top = 0;
78 for (i = j = 0; i < ctx->outpages; ++i, ++j) {
79 struct page *const page = rq->out[i];
80 struct page *victim;
81
82 if (j >= lz4_max_distance_pages)
83 j = 0;
84
85 /* 'valid' bounced can only be tested after a complete round */
86 if (!rq->fillgaps && test_bit(j, bounced)) {
87 DBG_BUGON(i < lz4_max_distance_pages);
88 DBG_BUGON(top >= lz4_max_distance_pages);
89 availables[top++] = rq->out[i - lz4_max_distance_pages];
90 }
91
92 if (page) {
93 __clear_bit(j, bounced);
94 if (!PageHighMem(page)) {
95 if (!i) {
96 kaddr = page_address(page);
97 continue;
98 }
99 if (kaddr &&
100 kaddr + PAGE_SIZE == page_address(page)) {
101 kaddr += PAGE_SIZE;
102 continue;
103 }
104 }
105 kaddr = NULL;
106 continue;
107 }
108 kaddr = NULL;
109 __set_bit(j, bounced);
110
111 if (top) {
112 victim = availables[--top];
113 get_page(victim);
114 } else {
115 victim = erofs_allocpage(pagepool,
116 GFP_KERNEL | __GFP_NOFAIL);
117 set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
118 }
119 rq->out[i] = victim;
120 }
121 return kaddr ? 1 : 0;
122 }
123
z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx * ctx,void * inpage,unsigned int * inputmargin,int * maptype,bool may_inplace)124 static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
125 void *inpage, unsigned int *inputmargin, int *maptype,
126 bool may_inplace)
127 {
128 struct z_erofs_decompress_req *rq = ctx->rq;
129 unsigned int omargin, total, i, j;
130 struct page **in;
131 void *src, *tmp;
132
133 if (rq->inplace_io) {
134 omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
135 if (rq->partial_decoding || !may_inplace ||
136 omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
137 goto docopy;
138
139 for (i = 0; i < ctx->inpages; ++i) {
140 DBG_BUGON(rq->in[i] == NULL);
141 for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j)
142 if (rq->out[j] == rq->in[i])
143 goto docopy;
144 }
145 }
146
147 if (ctx->inpages <= 1) {
148 *maptype = 0;
149 return inpage;
150 }
151 kunmap_atomic(inpage);
152 might_sleep();
153 src = erofs_vm_map_ram(rq->in, ctx->inpages);
154 if (!src)
155 return ERR_PTR(-ENOMEM);
156 *maptype = 1;
157 return src;
158
159 docopy:
160 /* Or copy compressed data which can be overlapped to per-CPU buffer */
161 in = rq->in;
162 src = erofs_get_pcpubuf(ctx->inpages);
163 if (!src) {
164 DBG_BUGON(1);
165 kunmap_atomic(inpage);
166 return ERR_PTR(-EFAULT);
167 }
168
169 tmp = src;
170 total = rq->inputsize;
171 while (total) {
172 unsigned int page_copycnt =
173 min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
174
175 if (!inpage)
176 inpage = kmap_atomic(*in);
177 memcpy(tmp, inpage + *inputmargin, page_copycnt);
178 kunmap_atomic(inpage);
179 inpage = NULL;
180 tmp += page_copycnt;
181 total -= page_copycnt;
182 ++in;
183 *inputmargin = 0;
184 }
185 *maptype = 2;
186 return src;
187 }
188
189 /*
190 * Get the exact inputsize with zero_padding feature.
191 * - For LZ4, it should work if zero_padding feature is on (5.3+);
192 * - For MicroLZMA, it'd be enabled all the time.
193 */
z_erofs_fixup_insize(struct z_erofs_decompress_req * rq,const char * padbuf,unsigned int padbufsize)194 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
195 unsigned int padbufsize)
196 {
197 const char *padend;
198
199 padend = memchr_inv(padbuf, 0, padbufsize);
200 if (!padend)
201 return -EFSCORRUPTED;
202 rq->inputsize -= padend - padbuf;
203 rq->pageofs_in += padend - padbuf;
204 return 0;
205 }
206
z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx * ctx,u8 * out)207 static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
208 u8 *out)
209 {
210 struct z_erofs_decompress_req *rq = ctx->rq;
211 bool support_0padding = false, may_inplace = false;
212 unsigned int inputmargin;
213 u8 *headpage, *src;
214 int ret, maptype;
215
216 DBG_BUGON(*rq->in == NULL);
217 headpage = kmap_atomic(*rq->in);
218
219 /* LZ4 decompression inplace is only safe if zero_padding is enabled */
220 if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
221 support_0padding = true;
222 ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
223 min_t(unsigned int, rq->inputsize,
224 EROFS_BLKSIZ - rq->pageofs_in));
225 if (ret) {
226 kunmap_atomic(headpage);
227 return ret;
228 }
229 may_inplace = !((rq->pageofs_in + rq->inputsize) &
230 (EROFS_BLKSIZ - 1));
231 }
232
233 inputmargin = rq->pageofs_in;
234 src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
235 &maptype, may_inplace);
236 if (IS_ERR(src))
237 return PTR_ERR(src);
238
239 /* legacy format could compress extra data in a pcluster. */
240 if (rq->partial_decoding || !support_0padding)
241 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
242 rq->inputsize, rq->outputsize, rq->outputsize);
243 else
244 ret = LZ4_decompress_safe(src + inputmargin, out,
245 rq->inputsize, rq->outputsize);
246
247 if (ret != rq->outputsize) {
248 erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
249 ret, rq->inputsize, inputmargin, rq->outputsize);
250
251 print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
252 16, 1, src + inputmargin, rq->inputsize, true);
253 print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
254 16, 1, out, rq->outputsize, true);
255
256 if (ret >= 0)
257 memset(out + ret, 0, rq->outputsize - ret);
258 ret = -EIO;
259 } else {
260 ret = 0;
261 }
262
263 if (maptype == 0) {
264 kunmap_atomic(headpage);
265 } else if (maptype == 1) {
266 vm_unmap_ram(src, ctx->inpages);
267 } else if (maptype == 2) {
268 erofs_put_pcpubuf(src);
269 } else {
270 DBG_BUGON(1);
271 return -EFAULT;
272 }
273 return ret;
274 }
275
z_erofs_lz4_decompress(struct z_erofs_decompress_req * rq,struct page ** pagepool)276 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
277 struct page **pagepool)
278 {
279 struct z_erofs_lz4_decompress_ctx ctx;
280 unsigned int dst_maptype;
281 void *dst;
282 int ret;
283
284 ctx.rq = rq;
285 ctx.oend = rq->pageofs_out + rq->outputsize;
286 ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
287 ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
288
289 /* one optimized fast path only for non bigpcluster cases yet */
290 if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
291 DBG_BUGON(!*rq->out);
292 dst = kmap_atomic(*rq->out);
293 dst_maptype = 0;
294 goto dstmap_out;
295 }
296
297 /* general decoding path which can be used for all cases */
298 ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
299 if (ret < 0) {
300 return ret;
301 } else if (ret > 0) {
302 dst = page_address(*rq->out);
303 dst_maptype = 1;
304 } else {
305 dst = erofs_vm_map_ram(rq->out, ctx.outpages);
306 if (!dst)
307 return -ENOMEM;
308 dst_maptype = 2;
309 }
310
311 dstmap_out:
312 ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
313 if (!dst_maptype)
314 kunmap_atomic(dst);
315 else if (dst_maptype == 2)
316 vm_unmap_ram(dst, ctx.outpages);
317 return ret;
318 }
319
z_erofs_transform_plain(struct z_erofs_decompress_req * rq,struct page ** pagepool)320 static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
321 struct page **pagepool)
322 {
323 const unsigned int inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
324 const unsigned int outpages =
325 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
326 const unsigned int righthalf = min_t(unsigned int, rq->outputsize,
327 PAGE_SIZE - rq->pageofs_out);
328 const unsigned int lefthalf = rq->outputsize - righthalf;
329 const unsigned int interlaced_offset =
330 rq->alg == Z_EROFS_COMPRESSION_SHIFTED ? 0 : rq->pageofs_out;
331 unsigned char *src, *dst;
332
333 if (outpages > 2 && rq->alg == Z_EROFS_COMPRESSION_SHIFTED) {
334 DBG_BUGON(1);
335 return -EFSCORRUPTED;
336 }
337
338 if (rq->out[0] == *rq->in) {
339 DBG_BUGON(rq->pageofs_out);
340 return 0;
341 }
342
343 src = kmap_local_page(rq->in[inpages - 1]) + rq->pageofs_in;
344 if (rq->out[0]) {
345 dst = kmap_local_page(rq->out[0]);
346 memcpy(dst + rq->pageofs_out, src + interlaced_offset,
347 righthalf);
348 kunmap_local(dst);
349 }
350
351 if (outpages > inpages) {
352 DBG_BUGON(!rq->out[outpages - 1]);
353 if (rq->out[outpages - 1] != rq->in[inpages - 1]) {
354 dst = kmap_local_page(rq->out[outpages - 1]);
355 memcpy(dst, interlaced_offset ? src :
356 (src + righthalf), lefthalf);
357 kunmap_local(dst);
358 } else if (!interlaced_offset) {
359 memmove(src, src + righthalf, lefthalf);
360 }
361 }
362 kunmap_local(src);
363 return 0;
364 }
365
366 static struct z_erofs_decompressor decompressors[] = {
367 [Z_EROFS_COMPRESSION_SHIFTED] = {
368 .decompress = z_erofs_transform_plain,
369 .name = "shifted"
370 },
371 [Z_EROFS_COMPRESSION_INTERLACED] = {
372 .decompress = z_erofs_transform_plain,
373 .name = "interlaced"
374 },
375 [Z_EROFS_COMPRESSION_LZ4] = {
376 .decompress = z_erofs_lz4_decompress,
377 .name = "lz4"
378 },
379 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
380 [Z_EROFS_COMPRESSION_LZMA] = {
381 .decompress = z_erofs_lzma_decompress,
382 .name = "lzma"
383 },
384 #endif
385 };
386
z_erofs_decompress(struct z_erofs_decompress_req * rq,struct page ** pagepool)387 int z_erofs_decompress(struct z_erofs_decompress_req *rq,
388 struct page **pagepool)
389 {
390 return decompressors[rq->alg].decompress(rq, pagepool);
391 }
392