1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/drivers/staging/erofs/data.c
4 *
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
12 */
13 #include "internal.h"
14 #include <linux/prefetch.h>
15
16 #include <trace/events/erofs.h>
17
read_endio(struct bio * bio)18 static inline void read_endio(struct bio *bio)
19 {
20 int i;
21 struct bio_vec *bvec;
22 const blk_status_t err = bio->bi_status;
23
24 bio_for_each_segment_all(bvec, bio, i) {
25 struct page *page = bvec->bv_page;
26
27 /* page is already locked */
28 BUG_ON(PageUptodate(page));
29
30 if (unlikely(err))
31 SetPageError(page);
32 else
33 SetPageUptodate(page);
34
35 unlock_page(page);
36 /* page could be reclaimed now */
37 }
38 bio_put(bio);
39 }
40
41 /* prio -- true is used for dir */
erofs_get_meta_page(struct super_block * sb,erofs_blk_t blkaddr,bool prio)42 struct page *erofs_get_meta_page(struct super_block *sb,
43 erofs_blk_t blkaddr, bool prio)
44 {
45 struct inode *bd_inode = sb->s_bdev->bd_inode;
46 struct address_space *mapping = bd_inode->i_mapping;
47 struct page *page;
48
49 repeat:
50 page = find_or_create_page(mapping, blkaddr,
51 /*
52 * Prefer looping in the allocator rather than here,
53 * at least that code knows what it's doing.
54 */
55 mapping_gfp_constraint(mapping, ~__GFP_FS) | __GFP_NOFAIL);
56
57 BUG_ON(!page || !PageLocked(page));
58
59 if (!PageUptodate(page)) {
60 struct bio *bio;
61 int err;
62
63 bio = prepare_bio(sb, blkaddr, 1, read_endio);
64 err = bio_add_page(bio, page, PAGE_SIZE, 0);
65 BUG_ON(err != PAGE_SIZE);
66
67 __submit_bio(bio, REQ_OP_READ,
68 REQ_META | (prio ? REQ_PRIO : 0));
69
70 lock_page(page);
71
72 /* the page has been truncated by others? */
73 if (unlikely(page->mapping != mapping)) {
74 unlock_page(page);
75 put_page(page);
76 goto repeat;
77 }
78
79 /* more likely a read error */
80 if (unlikely(!PageUptodate(page))) {
81 unlock_page(page);
82 put_page(page);
83
84 page = ERR_PTR(-EIO);
85 }
86 }
87 return page;
88 }
89
erofs_map_blocks_flatmode(struct inode * inode,struct erofs_map_blocks * map,int flags)90 static int erofs_map_blocks_flatmode(struct inode *inode,
91 struct erofs_map_blocks *map,
92 int flags)
93 {
94 erofs_blk_t nblocks, lastblk;
95 u64 offset = map->m_la;
96 struct erofs_vnode *vi = EROFS_V(inode);
97
98 trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
99 BUG_ON(is_inode_layout_compression(inode));
100
101 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
102 lastblk = nblocks - is_inode_layout_inline(inode);
103
104 if (unlikely(offset >= inode->i_size)) {
105 /* leave out-of-bound access unmapped */
106 map->m_flags = 0;
107 map->m_plen = 0;
108 goto out;
109 }
110
111 /* there is no hole in flatmode */
112 map->m_flags = EROFS_MAP_MAPPED;
113
114 if (offset < blknr_to_addr(lastblk)) {
115 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
116 map->m_plen = blknr_to_addr(lastblk) - offset;
117 } else if (is_inode_layout_inline(inode)) {
118 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
119 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
120
121 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
122 vi->xattr_isize + erofs_blkoff(map->m_la);
123 map->m_plen = inode->i_size - offset;
124
125 /* inline data should locate in one meta block */
126 BUG_ON(erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE);
127 map->m_flags |= EROFS_MAP_META;
128 } else {
129 errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
130 vi->nid, inode->i_size, map->m_la);
131 BUG();
132 }
133
134 out:
135 map->m_llen = map->m_plen;
136 trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
137 return 0;
138 }
139
140 #ifdef CONFIG_EROFS_FS_ZIP
141 extern int z_erofs_map_blocks_iter(struct inode *,
142 struct erofs_map_blocks *, struct page **, int);
143 #endif
144
erofs_map_blocks_iter(struct inode * inode,struct erofs_map_blocks * map,struct page ** mpage_ret,int flags)145 int erofs_map_blocks_iter(struct inode *inode,
146 struct erofs_map_blocks *map,
147 struct page **mpage_ret, int flags)
148 {
149 /* by default, reading raw data never use erofs_map_blocks_iter */
150 if (unlikely(!is_inode_layout_compression(inode))) {
151 if (*mpage_ret != NULL)
152 put_page(*mpage_ret);
153 *mpage_ret = NULL;
154
155 return erofs_map_blocks(inode, map, flags);
156 }
157
158 #ifdef CONFIG_EROFS_FS_ZIP
159 return z_erofs_map_blocks_iter(inode, map, mpage_ret, flags);
160 #else
161 /* data compression is not available */
162 return -ENOTSUPP;
163 #endif
164 }
165
erofs_map_blocks(struct inode * inode,struct erofs_map_blocks * map,int flags)166 int erofs_map_blocks(struct inode *inode,
167 struct erofs_map_blocks *map, int flags)
168 {
169 if (unlikely(is_inode_layout_compression(inode))) {
170 struct page *mpage = NULL;
171 int err;
172
173 err = erofs_map_blocks_iter(inode, map, &mpage, flags);
174 if (mpage != NULL)
175 put_page(mpage);
176 return err;
177 }
178 return erofs_map_blocks_flatmode(inode, map, flags);
179 }
180
erofs_read_raw_page(struct bio * bio,struct address_space * mapping,struct page * page,erofs_off_t * last_block,unsigned nblocks,bool ra)181 static inline struct bio *erofs_read_raw_page(
182 struct bio *bio,
183 struct address_space *mapping,
184 struct page *page,
185 erofs_off_t *last_block,
186 unsigned nblocks,
187 bool ra)
188 {
189 struct inode *inode = mapping->host;
190 erofs_off_t current_block = (erofs_off_t)page->index;
191 int err;
192
193 BUG_ON(!nblocks);
194
195 if (PageUptodate(page)) {
196 err = 0;
197 goto has_updated;
198 }
199
200 if (cleancache_get_page(page) == 0) {
201 err = 0;
202 SetPageUptodate(page);
203 goto has_updated;
204 }
205
206 /* note that for readpage case, bio also equals to NULL */
207 if (bio != NULL &&
208 /* not continuous */
209 *last_block + 1 != current_block) {
210 submit_bio_retry:
211 __submit_bio(bio, REQ_OP_READ, 0);
212 bio = NULL;
213 }
214
215 if (bio == NULL) {
216 struct erofs_map_blocks map = {
217 .m_la = blknr_to_addr(current_block),
218 };
219 erofs_blk_t blknr;
220 unsigned blkoff;
221
222 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
223 if (unlikely(err))
224 goto err_out;
225
226 /* zero out the holed page */
227 if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) {
228 zero_user_segment(page, 0, PAGE_SIZE);
229 SetPageUptodate(page);
230
231 /* imply err = 0, see erofs_map_blocks */
232 goto has_updated;
233 }
234
235 /* for RAW access mode, m_plen must be equal to m_llen */
236 BUG_ON(map.m_plen != map.m_llen);
237
238 blknr = erofs_blknr(map.m_pa);
239 blkoff = erofs_blkoff(map.m_pa);
240
241 /* deal with inline page */
242 if (map.m_flags & EROFS_MAP_META) {
243 void *vsrc, *vto;
244 struct page *ipage;
245
246 BUG_ON(map.m_plen > PAGE_SIZE);
247
248 ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
249
250 if (IS_ERR(ipage)) {
251 err = PTR_ERR(ipage);
252 goto err_out;
253 }
254
255 vsrc = kmap_atomic(ipage);
256 vto = kmap_atomic(page);
257 memcpy(vto, vsrc + blkoff, map.m_plen);
258 memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
259 kunmap_atomic(vto);
260 kunmap_atomic(vsrc);
261 flush_dcache_page(page);
262
263 SetPageUptodate(page);
264 /* TODO: could we unlock the page earlier? */
265 unlock_page(ipage);
266 put_page(ipage);
267
268 /* imply err = 0, see erofs_map_blocks */
269 goto has_updated;
270 }
271
272 /* pa must be block-aligned for raw reading */
273 BUG_ON(erofs_blkoff(map.m_pa) != 0);
274
275 /* max # of continuous pages */
276 if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
277 nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
278 if (nblocks > BIO_MAX_PAGES)
279 nblocks = BIO_MAX_PAGES;
280
281 bio = prepare_bio(inode->i_sb, blknr, nblocks, read_endio);
282 }
283
284 err = bio_add_page(bio, page, PAGE_SIZE, 0);
285 /* out of the extent or bio is full */
286 if (err < PAGE_SIZE)
287 goto submit_bio_retry;
288
289 *last_block = current_block;
290
291 /* shift in advance in case of it followed by too many gaps */
292 if (unlikely(bio->bi_vcnt >= bio->bi_max_vecs)) {
293 /* err should reassign to 0 after submitting */
294 err = 0;
295 goto submit_bio_out;
296 }
297
298 return bio;
299
300 err_out:
301 /* for sync reading, set page error immediately */
302 if (!ra) {
303 SetPageError(page);
304 ClearPageUptodate(page);
305 }
306 has_updated:
307 unlock_page(page);
308
309 /* if updated manually, continuous pages has a gap */
310 if (bio != NULL)
311 submit_bio_out:
312 __submit_bio(bio, REQ_OP_READ, 0);
313
314 return unlikely(err) ? ERR_PTR(err) : NULL;
315 }
316
317 /*
318 * since we dont have write or truncate flows, so no inode
319 * locking needs to be held at the moment.
320 */
erofs_raw_access_readpage(struct file * file,struct page * page)321 static int erofs_raw_access_readpage(struct file *file, struct page *page)
322 {
323 erofs_off_t last_block;
324 struct bio *bio;
325
326 trace_erofs_readpage(page, true);
327
328 bio = erofs_read_raw_page(NULL, page->mapping,
329 page, &last_block, 1, false);
330
331 if (IS_ERR(bio))
332 return PTR_ERR(bio);
333
334 BUG_ON(bio != NULL); /* since we have only one bio -- must be NULL */
335 return 0;
336 }
337
erofs_raw_access_readpages(struct file * filp,struct address_space * mapping,struct list_head * pages,unsigned nr_pages)338 static int erofs_raw_access_readpages(struct file *filp,
339 struct address_space *mapping,
340 struct list_head *pages, unsigned nr_pages)
341 {
342 erofs_off_t last_block;
343 struct bio *bio = NULL;
344 gfp_t gfp = readahead_gfp_mask(mapping);
345 struct page *page = list_last_entry(pages, struct page, lru);
346
347 trace_erofs_readpages(mapping->host, page, nr_pages, true);
348
349 for (; nr_pages; --nr_pages) {
350 page = list_entry(pages->prev, struct page, lru);
351
352 prefetchw(&page->flags);
353 list_del(&page->lru);
354
355 if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
356 bio = erofs_read_raw_page(bio, mapping, page,
357 &last_block, nr_pages, true);
358
359 /* all the page errors are ignored when readahead */
360 if (IS_ERR(bio)) {
361 pr_err("%s, readahead error at page %lu of nid %llu\n",
362 __func__, page->index,
363 EROFS_V(mapping->host)->nid);
364
365 bio = NULL;
366 }
367 }
368
369 /* pages could still be locked */
370 put_page(page);
371 }
372 BUG_ON(!list_empty(pages));
373
374 /* the rare case (end in gaps) */
375 if (unlikely(bio != NULL))
376 __submit_bio(bio, REQ_OP_READ, 0);
377 return 0;
378 }
379
380 /* for uncompressed (aligned) files and raw access for other files */
381 const struct address_space_operations erofs_raw_access_aops = {
382 .readpage = erofs_raw_access_readpage,
383 .readpages = erofs_raw_access_readpages,
384 };
385
386