Lines Matching refs:async_chunk
450 struct async_chunk { struct
465 struct async_chunk chunks[]; argument
468 static noinline int add_async_extent(struct async_chunk *cow, in add_async_extent()
560 static noinline int compress_file_range(struct async_chunk *async_chunk) in compress_file_range() argument
562 struct inode *inode = async_chunk->inode; in compress_file_range()
565 u64 start = async_chunk->start; in compress_file_range()
566 u64 end = async_chunk->end; in compress_file_range()
772 add_async_extent(async_chunk, start, total_in, in compress_file_range()
812 if (async_chunk->locked_page && in compress_file_range()
813 (page_offset(async_chunk->locked_page) >= start && in compress_file_range()
814 page_offset(async_chunk->locked_page)) <= end) { in compress_file_range()
815 __set_page_dirty_nobuffers(async_chunk->locked_page); in compress_file_range()
821 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, in compress_file_range()
850 static noinline void submit_compressed_extents(struct async_chunk *async_chunk) in submit_compressed_extents() argument
852 struct btrfs_inode *inode = BTRFS_I(async_chunk->inode); in submit_compressed_extents()
863 while (!list_empty(&async_chunk->extents)) { in submit_compressed_extents()
864 async_extent = list_entry(async_chunk->extents.next, in submit_compressed_extents()
877 ret = cow_file_range(inode, async_chunk->locked_page, in submit_compressed_extents()
897 else if (ret && async_chunk->locked_page) in submit_compressed_extents()
898 unlock_page(async_chunk->locked_page); in submit_compressed_extents()
976 async_chunk->write_flags, in submit_compressed_extents()
977 async_chunk->blkcg_css)) { in submit_compressed_extents()
1286 struct async_chunk *async_chunk; in async_cow_start() local
1289 async_chunk = container_of(work, struct async_chunk, work); in async_cow_start()
1291 compressed_extents = compress_file_range(async_chunk); in async_cow_start()
1293 btrfs_add_delayed_iput(async_chunk->inode); in async_cow_start()
1294 async_chunk->inode = NULL; in async_cow_start()
1303 struct async_chunk *async_chunk = container_of(work, struct async_chunk, in async_cow_submit() local
1308 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> in async_cow_submit()
1317 if (async_chunk->inode) in async_cow_submit()
1318 submit_compressed_extents(async_chunk); in async_cow_submit()
1328 struct async_chunk *async_chunk; in async_cow_free() local
1330 async_chunk = container_of(work, struct async_chunk, work); in async_cow_free()
1331 if (async_chunk->inode) in async_cow_free()
1332 btrfs_add_delayed_iput(async_chunk->inode); in async_cow_free()
1333 if (async_chunk->blkcg_css) in async_cow_free()
1334 css_put(async_chunk->blkcg_css); in async_cow_free()
1339 if (atomic_dec_and_test(async_chunk->pending)) in async_cow_free()
1340 kvfree(async_chunk->pending); in async_cow_free()
1352 struct async_chunk *async_chunk; in cow_file_range_async() local
1387 async_chunk = ctx->chunks; in cow_file_range_async()
1401 async_chunk[i].pending = &ctx->num_chunks; in cow_file_range_async()
1402 async_chunk[i].inode = &inode->vfs_inode; in cow_file_range_async()
1403 async_chunk[i].start = start; in cow_file_range_async()
1404 async_chunk[i].end = cur_end; in cow_file_range_async()
1405 async_chunk[i].write_flags = write_flags; in cow_file_range_async()
1406 INIT_LIST_HEAD(&async_chunk[i].extents); in cow_file_range_async()
1429 async_chunk[i].locked_page = locked_page; in cow_file_range_async()
1432 async_chunk[i].locked_page = NULL; in cow_file_range_async()
1437 async_chunk[i].blkcg_css = blkcg_css; in cow_file_range_async()
1439 async_chunk[i].blkcg_css = NULL; in cow_file_range_async()
1442 btrfs_init_work(&async_chunk[i].work, async_cow_start, in cow_file_range_async()
1448 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); in cow_file_range_async()