1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/drivers/staging/erofs/unzip_vle_lz4.c
4 *
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
12 */
13 #include "unzip_vle.h"
14
15 #if Z_EROFS_CLUSTER_MAX_PAGES > Z_EROFS_VLE_INLINE_PAGEVECS
16 #define EROFS_PERCPU_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES
17 #else
18 #define EROFS_PERCPU_NR_PAGES Z_EROFS_VLE_INLINE_PAGEVECS
19 #endif
20
21 static struct {
22 char data[PAGE_SIZE * EROFS_PERCPU_NR_PAGES];
23 } erofs_pcpubuf[NR_CPUS];
24
z_erofs_vle_plain_copy(struct page ** compressed_pages,unsigned clusterpages,struct page ** pages,unsigned nr_pages,unsigned short pageofs)25 int z_erofs_vle_plain_copy(struct page **compressed_pages,
26 unsigned clusterpages,
27 struct page **pages,
28 unsigned nr_pages,
29 unsigned short pageofs)
30 {
31 unsigned i, j;
32 void *src = NULL;
33 const unsigned righthalf = PAGE_SIZE - pageofs;
34 char *percpu_data;
35 bool mirrored[Z_EROFS_CLUSTER_MAX_PAGES] = { 0 };
36
37 preempt_disable();
38 percpu_data = erofs_pcpubuf[smp_processor_id()].data;
39
40 j = 0;
41 for (i = 0; i < nr_pages; j = i++) {
42 struct page *page = pages[i];
43 void *dst;
44
45 if (page == NULL) {
46 if (src != NULL) {
47 if (!mirrored[j])
48 kunmap_atomic(src);
49 src = NULL;
50 }
51 continue;
52 }
53
54 dst = kmap_atomic(page);
55
56 for (; j < clusterpages; ++j) {
57 if (compressed_pages[j] != page)
58 continue;
59
60 BUG_ON(mirrored[j]);
61 memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
62 mirrored[j] = true;
63 break;
64 }
65
66 if (i) {
67 if (src == NULL)
68 src = mirrored[i-1] ?
69 percpu_data + (i-1) * PAGE_SIZE :
70 kmap_atomic(compressed_pages[i-1]);
71
72 memcpy(dst, src + righthalf, pageofs);
73
74 if (!mirrored[i-1])
75 kunmap_atomic(src);
76
77 if (unlikely(i >= clusterpages)) {
78 kunmap_atomic(dst);
79 break;
80 }
81 }
82
83 if (!righthalf)
84 src = NULL;
85 else {
86 src = mirrored[i] ? percpu_data + i * PAGE_SIZE :
87 kmap_atomic(compressed_pages[i]);
88
89 memcpy(dst + pageofs, src, righthalf);
90 }
91
92 kunmap_atomic(dst);
93 }
94
95 if (src != NULL && !mirrored[j])
96 kunmap_atomic(src);
97
98 preempt_enable();
99 return 0;
100 }
101
102 extern int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen);
103
z_erofs_vle_unzip_fast_percpu(struct page ** compressed_pages,unsigned clusterpages,struct page ** pages,unsigned outlen,unsigned short pageofs,void (* endio)(struct page *))104 int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
105 unsigned clusterpages,
106 struct page **pages,
107 unsigned outlen,
108 unsigned short pageofs,
109 void (*endio)(struct page *))
110 {
111 void *vin, *vout;
112 unsigned nr_pages, i, j;
113 int ret;
114
115 if (outlen + pageofs > EROFS_PERCPU_NR_PAGES * PAGE_SIZE)
116 return -ENOTSUPP;
117
118 nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
119
120 if (clusterpages == 1)
121 vin = kmap_atomic(compressed_pages[0]);
122 else
123 vin = erofs_vmap(compressed_pages, clusterpages);
124
125 preempt_disable();
126 vout = erofs_pcpubuf[smp_processor_id()].data;
127
128 ret = z_erofs_unzip_lz4(vin, vout + pageofs,
129 clusterpages * PAGE_SIZE, outlen);
130
131 if (ret >= 0) {
132 outlen = ret;
133 ret = 0;
134 }
135
136 for (i = 0; i < nr_pages; ++i) {
137 j = min((unsigned)PAGE_SIZE - pageofs, outlen);
138
139 if (pages[i] != NULL) {
140 if (ret < 0)
141 SetPageError(pages[i]);
142 else if (clusterpages == 1 && pages[i] == compressed_pages[0])
143 memcpy(vin + pageofs, vout + pageofs, j);
144 else {
145 void *dst = kmap_atomic(pages[i]);
146
147 memcpy(dst + pageofs, vout + pageofs, j);
148 kunmap_atomic(dst);
149 }
150 endio(pages[i]);
151 }
152 vout += PAGE_SIZE;
153 outlen -= j;
154 pageofs = 0;
155 }
156 preempt_enable();
157
158 if (clusterpages == 1)
159 kunmap_atomic(vin);
160 else
161 erofs_vunmap(vin, clusterpages);
162
163 return ret;
164 }
165
z_erofs_vle_unzip_vmap(struct page ** compressed_pages,unsigned clusterpages,void * vout,unsigned llen,unsigned short pageofs,bool overlapped)166 int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
167 unsigned clusterpages,
168 void *vout,
169 unsigned llen,
170 unsigned short pageofs,
171 bool overlapped)
172 {
173 void *vin;
174 unsigned i;
175 int ret;
176
177 if (overlapped) {
178 preempt_disable();
179 vin = erofs_pcpubuf[smp_processor_id()].data;
180
181 for (i = 0; i < clusterpages; ++i) {
182 void *t = kmap_atomic(compressed_pages[i]);
183
184 memcpy(vin + PAGE_SIZE *i, t, PAGE_SIZE);
185 kunmap_atomic(t);
186 }
187 } else if (clusterpages == 1)
188 vin = kmap_atomic(compressed_pages[0]);
189 else {
190 vin = erofs_vmap(compressed_pages, clusterpages);
191 }
192
193 ret = z_erofs_unzip_lz4(vin, vout + pageofs,
194 clusterpages * PAGE_SIZE, llen);
195 if (ret > 0)
196 ret = 0;
197
198 if (!overlapped) {
199 if (clusterpages == 1)
200 kunmap_atomic(vin);
201 else {
202 erofs_vunmap(vin, clusterpages);
203 }
204 } else
205 preempt_enable();
206
207 return ret;
208 }
209
210