Lines Matching +full:no +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
15 if (buf->kmap_type == EROFS_KMAP) in erofs_unmap_metabuf()
16 kunmap(buf->page); in erofs_unmap_metabuf()
17 else if (buf->kmap_type == EROFS_KMAP_ATOMIC) in erofs_unmap_metabuf()
18 kunmap_atomic(buf->base); in erofs_unmap_metabuf()
19 buf->base = NULL; in erofs_unmap_metabuf()
20 buf->kmap_type = EROFS_NO_KMAP; in erofs_unmap_metabuf()
25 if (!buf->page) in erofs_put_metabuf()
28 put_page(buf->page); in erofs_put_metabuf()
29 buf->page = NULL; in erofs_put_metabuf()
35 struct address_space *const mapping = inode->i_mapping; in erofs_bread()
38 struct page *page = buf->page; in erofs_bread()
42 if (!page || page->index != index) { in erofs_bread()
51 /* should already be PageUptodate, no need to lock page */ in erofs_bread()
53 buf->page = page; in erofs_bread()
55 if (buf->kmap_type == EROFS_NO_KMAP) { in erofs_bread()
57 buf->base = kmap(page); in erofs_bread()
59 buf->base = kmap_atomic(page); in erofs_bread()
60 buf->kmap_type = type; in erofs_bread()
61 } else if (buf->kmap_type != type) { in erofs_bread()
63 return ERR_PTR(-EFAULT); in erofs_bread()
67 return buf->base + (offset & ~PAGE_MASK); in erofs_bread()
74 return erofs_bread(buf, EROFS_SB(sb)->s_fscache->inode, in erofs_read_metabuf()
77 return erofs_bread(buf, sb->s_bdev->bd_inode, blkaddr, type); in erofs_read_metabuf()
81 struct erofs_map_blocks *map, in erofs_map_blocks_flatmode() argument
85 u64 offset = map->m_la; in erofs_map_blocks_flatmode()
87 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE); in erofs_map_blocks_flatmode()
89 nblocks = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); in erofs_map_blocks_flatmode()
90 lastblk = nblocks - tailendpacking; in erofs_map_blocks_flatmode()
92 /* there is no hole in flatmode */ in erofs_map_blocks_flatmode()
93 map->m_flags = EROFS_MAP_MAPPED; in erofs_map_blocks_flatmode()
95 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la; in erofs_map_blocks_flatmode()
96 map->m_plen = blknr_to_addr(lastblk) - offset; in erofs_map_blocks_flatmode()
98 /* 2 - inode inline B: inode, [xattrs], inline last blk... */ in erofs_map_blocks_flatmode()
99 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); in erofs_map_blocks_flatmode()
101 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize + in erofs_map_blocks_flatmode()
102 vi->xattr_isize + erofs_blkoff(map->m_la); in erofs_map_blocks_flatmode()
103 map->m_plen = inode->i_size - offset; in erofs_map_blocks_flatmode()
106 if (erofs_blkoff(map->m_pa) + map->m_plen > EROFS_BLKSIZ) { in erofs_map_blocks_flatmode()
107 erofs_err(inode->i_sb, in erofs_map_blocks_flatmode()
109 vi->nid); in erofs_map_blocks_flatmode()
111 return -EFSCORRUPTED; in erofs_map_blocks_flatmode()
113 map->m_flags |= EROFS_MAP_META; in erofs_map_blocks_flatmode()
115 erofs_err(inode->i_sb, in erofs_map_blocks_flatmode()
117 vi->nid, inode->i_size, map->m_la); in erofs_map_blocks_flatmode()
119 return -EIO; in erofs_map_blocks_flatmode()
125 struct erofs_map_blocks *map, int flags) in erofs_map_blocks() argument
127 struct super_block *sb = inode->i_sb; in erofs_map_blocks()
137 trace_erofs_map_blocks_enter(inode, map, flags); in erofs_map_blocks()
138 map->m_deviceid = 0; in erofs_map_blocks()
139 if (map->m_la >= inode->i_size) { in erofs_map_blocks()
140 /* leave out-of-bound access unmapped */ in erofs_map_blocks()
141 map->m_flags = 0; in erofs_map_blocks()
142 map->m_plen = 0; in erofs_map_blocks()
146 if (vi->datalayout != EROFS_INODE_CHUNK_BASED) { in erofs_map_blocks()
147 err = erofs_map_blocks_flatmode(inode, map, flags); in erofs_map_blocks()
151 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) in erofs_map_blocks()
154 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */ in erofs_map_blocks()
156 chunknr = map->m_la >> vi->chunkbits; in erofs_map_blocks()
157 pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + in erofs_map_blocks()
158 vi->xattr_isize, unit) + unit * chunknr; in erofs_map_blocks()
165 map->m_la = chunknr << vi->chunkbits; in erofs_map_blocks()
166 map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits, in erofs_map_blocks()
167 roundup(inode->i_size - map->m_la, EROFS_BLKSIZ)); in erofs_map_blocks()
169 /* handle block map */ in erofs_map_blocks()
170 if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) { in erofs_map_blocks()
174 map->m_flags = 0; in erofs_map_blocks()
176 map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr)); in erofs_map_blocks()
177 map->m_flags = EROFS_MAP_MAPPED; in erofs_map_blocks()
183 switch (le32_to_cpu(idx->blkaddr)) { in erofs_map_blocks()
185 map->m_flags = 0; in erofs_map_blocks()
188 map->m_deviceid = le16_to_cpu(idx->device_id) & in erofs_map_blocks()
189 EROFS_SB(sb)->device_id_mask; in erofs_map_blocks()
190 map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr)); in erofs_map_blocks()
191 map->m_flags = EROFS_MAP_MAPPED; in erofs_map_blocks()
198 map->m_llen = map->m_plen; in erofs_map_blocks()
199 trace_erofs_map_blocks_exit(inode, map, flags, 0); in erofs_map_blocks()
203 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) in erofs_map_dev() argument
205 struct erofs_dev_context *devs = EROFS_SB(sb)->devs; in erofs_map_dev()
210 map->m_bdev = sb->s_bdev; in erofs_map_dev()
211 map->m_daxdev = EROFS_SB(sb)->dax_dev; in erofs_map_dev()
212 map->m_dax_part_off = EROFS_SB(sb)->dax_part_off; in erofs_map_dev()
213 map->m_fscache = EROFS_SB(sb)->s_fscache; in erofs_map_dev()
215 if (map->m_deviceid) { in erofs_map_dev()
216 down_read(&devs->rwsem); in erofs_map_dev()
217 dif = idr_find(&devs->tree, map->m_deviceid - 1); in erofs_map_dev()
219 up_read(&devs->rwsem); in erofs_map_dev()
220 return -ENODEV; in erofs_map_dev()
222 map->m_bdev = dif->bdev; in erofs_map_dev()
223 map->m_daxdev = dif->dax_dev; in erofs_map_dev()
224 map->m_dax_part_off = dif->dax_part_off; in erofs_map_dev()
225 map->m_fscache = dif->fscache; in erofs_map_dev()
226 up_read(&devs->rwsem); in erofs_map_dev()
227 } else if (devs->extra_devices) { in erofs_map_dev()
228 down_read(&devs->rwsem); in erofs_map_dev()
229 idr_for_each_entry(&devs->tree, dif, id) { in erofs_map_dev()
232 if (!dif->mapped_blkaddr) in erofs_map_dev()
234 startoff = blknr_to_addr(dif->mapped_blkaddr); in erofs_map_dev()
235 length = blknr_to_addr(dif->blocks); in erofs_map_dev()
237 if (map->m_pa >= startoff && in erofs_map_dev()
238 map->m_pa < startoff + length) { in erofs_map_dev()
239 map->m_pa -= startoff; in erofs_map_dev()
240 map->m_bdev = dif->bdev; in erofs_map_dev()
241 map->m_daxdev = dif->dax_dev; in erofs_map_dev()
242 map->m_dax_part_off = dif->dax_part_off; in erofs_map_dev()
243 map->m_fscache = dif->fscache; in erofs_map_dev()
247 up_read(&devs->rwsem); in erofs_map_dev()
256 struct erofs_map_blocks map; in erofs_iomap_begin() local
259 map.m_la = offset; in erofs_iomap_begin()
260 map.m_llen = length; in erofs_iomap_begin()
262 ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); in erofs_iomap_begin()
267 .m_deviceid = map.m_deviceid, in erofs_iomap_begin()
268 .m_pa = map.m_pa, in erofs_iomap_begin()
270 ret = erofs_map_dev(inode->i_sb, &mdev); in erofs_iomap_begin()
274 iomap->offset = map.m_la; in erofs_iomap_begin()
276 iomap->dax_dev = mdev.m_daxdev; in erofs_iomap_begin()
278 iomap->bdev = mdev.m_bdev; in erofs_iomap_begin()
279 iomap->length = map.m_llen; in erofs_iomap_begin()
280 iomap->flags = 0; in erofs_iomap_begin()
281 iomap->private = NULL; in erofs_iomap_begin()
283 if (!(map.m_flags & EROFS_MAP_MAPPED)) { in erofs_iomap_begin()
284 iomap->type = IOMAP_HOLE; in erofs_iomap_begin()
285 iomap->addr = IOMAP_NULL_ADDR; in erofs_iomap_begin()
286 if (!iomap->length) in erofs_iomap_begin()
287 iomap->length = length; in erofs_iomap_begin()
291 if (map.m_flags & EROFS_MAP_META) { in erofs_iomap_begin()
295 iomap->type = IOMAP_INLINE; in erofs_iomap_begin()
296 ptr = erofs_read_metabuf(&buf, inode->i_sb, in erofs_iomap_begin()
300 iomap->inline_data = ptr + erofs_blkoff(mdev.m_pa); in erofs_iomap_begin()
301 iomap->private = buf.base; in erofs_iomap_begin()
303 iomap->type = IOMAP_MAPPED; in erofs_iomap_begin()
304 iomap->addr = mdev.m_pa; in erofs_iomap_begin()
306 iomap->addr += mdev.m_dax_part_off; in erofs_iomap_begin()
314 void *ptr = iomap->private; in erofs_iomap_end()
323 DBG_BUGON(iomap->type != IOMAP_INLINE); in erofs_iomap_end()
326 DBG_BUGON(iomap->type == IOMAP_INLINE); in erofs_iomap_end()
339 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) { in erofs_fiemap()
344 return -EOPNOTSUPP; in erofs_fiemap()
351 * since we dont have write or truncate flows, so no inode
371 struct inode *inode = file_inode(iocb->ki_filp); in erofs_file_read_iter()
373 /* no need taking (shared) inode lock since it's a ro filesystem */ in erofs_file_read_iter()
381 if (iocb->ki_flags & IOCB_DIRECT) { in erofs_file_read_iter()
382 struct block_device *bdev = inode->i_sb->s_bdev; in erofs_file_read_iter()
386 blksize_mask = bdev_logical_block_size(bdev) - 1; in erofs_file_read_iter()
388 blksize_mask = (1 << inode->i_blkbits) - 1; in erofs_file_read_iter()
390 if ((iocb->ki_pos | iov_iter_count(to) | in erofs_file_read_iter()
392 return -EINVAL; in erofs_file_read_iter()
430 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) in erofs_file_mmap()
431 return -EINVAL; in erofs_file_mmap()
433 vma->vm_ops = &erofs_dax_vm_ops; in erofs_file_mmap()
434 vma->vm_flags |= VM_HUGEPAGE; in erofs_file_mmap()