Lines Matching refs:async_chunk

364 struct async_chunk {  struct
378 struct async_chunk chunks[]; argument
381 static noinline int add_async_extent(struct async_chunk *cow, in add_async_extent()
469 static noinline int compress_file_range(struct async_chunk *async_chunk) in compress_file_range() argument
471 struct inode *inode = async_chunk->inode; in compress_file_range()
474 u64 start = async_chunk->start; in compress_file_range()
475 u64 end = async_chunk->end; in compress_file_range()
675 add_async_extent(async_chunk, start, total_in, in compress_file_range()
715 if (page_offset(async_chunk->locked_page) >= start && in compress_file_range()
716 page_offset(async_chunk->locked_page) <= end) in compress_file_range()
717 __set_page_dirty_nobuffers(async_chunk->locked_page); in compress_file_range()
722 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, in compress_file_range()
751 static noinline void submit_compressed_extents(struct async_chunk *async_chunk) in submit_compressed_extents() argument
753 struct inode *inode = async_chunk->inode; in submit_compressed_extents()
764 while (!list_empty(&async_chunk->extents)) { in submit_compressed_extents()
765 async_extent = list_entry(async_chunk->extents.next, in submit_compressed_extents()
778 ret = cow_file_range(inode, async_chunk->locked_page, in submit_compressed_extents()
799 unlock_page(async_chunk->locked_page); in submit_compressed_extents()
881 async_chunk->write_flags)) { in submit_compressed_extents()
1152 struct async_chunk *async_chunk; in async_cow_start() local
1155 async_chunk = container_of(work, struct async_chunk, work); in async_cow_start()
1157 compressed_extents = compress_file_range(async_chunk); in async_cow_start()
1159 btrfs_add_delayed_iput(async_chunk->inode); in async_cow_start()
1160 async_chunk->inode = NULL; in async_cow_start()
1169 struct async_chunk *async_chunk = container_of(work, struct async_chunk, in async_cow_submit() local
1174 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> in async_cow_submit()
1188 if (async_chunk->inode) in async_cow_submit()
1189 submit_compressed_extents(async_chunk); in async_cow_submit()
1194 struct async_chunk *async_chunk; in async_cow_free() local
1196 async_chunk = container_of(work, struct async_chunk, work); in async_cow_free()
1197 if (async_chunk->inode) in async_cow_free()
1198 btrfs_add_delayed_iput(async_chunk->inode); in async_cow_free()
1203 if (atomic_dec_and_test(async_chunk->pending)) in async_cow_free()
1204 kvfree(async_chunk->pending); in async_cow_free()
1214 struct async_chunk *async_chunk; in cow_file_range_async() local
1249 async_chunk = ctx->chunks; in cow_file_range_async()
1263 async_chunk[i].pending = &ctx->num_chunks; in cow_file_range_async()
1264 async_chunk[i].inode = inode; in cow_file_range_async()
1265 async_chunk[i].start = start; in cow_file_range_async()
1266 async_chunk[i].end = cur_end; in cow_file_range_async()
1267 async_chunk[i].locked_page = locked_page; in cow_file_range_async()
1268 async_chunk[i].write_flags = write_flags; in cow_file_range_async()
1269 INIT_LIST_HEAD(&async_chunk[i].extents); in cow_file_range_async()
1271 btrfs_init_work(&async_chunk[i].work, in cow_file_range_async()
1279 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); in cow_file_range_async()