Lines Matching refs:em

52 	struct extent_map *em;  in alloc_extent_map()  local
53 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); in alloc_extent_map()
54 if (!em) in alloc_extent_map()
56 RB_CLEAR_NODE(&em->rb_node); in alloc_extent_map()
57 em->flags = 0; in alloc_extent_map()
58 em->compress_type = BTRFS_COMPRESS_NONE; in alloc_extent_map()
59 em->generation = 0; in alloc_extent_map()
60 refcount_set(&em->refs, 1); in alloc_extent_map()
61 INIT_LIST_HEAD(&em->list); in alloc_extent_map()
62 return em; in alloc_extent_map()
72 void free_extent_map(struct extent_map *em) in free_extent_map() argument
74 if (!em) in free_extent_map()
76 WARN_ON(refcount_read(&em->refs) == 0); in free_extent_map()
77 if (refcount_dec_and_test(&em->refs)) { in free_extent_map()
78 WARN_ON(extent_map_in_tree(em)); in free_extent_map()
79 WARN_ON(!list_empty(&em->list)); in free_extent_map()
80 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) in free_extent_map()
81 kfree(em->map_lookup); in free_extent_map()
82 kmem_cache_free(extent_map_cache, em); in free_extent_map()
94 static int tree_insert(struct rb_root_cached *root, struct extent_map *em) in tree_insert() argument
100 u64 end = range_end(em->start, em->len); in tree_insert()
107 if (em->start < entry->start) { in tree_insert()
109 } else if (em->start >= extent_map_end(entry)) { in tree_insert()
118 while (parent && em->start >= extent_map_end(entry)) { in tree_insert()
123 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert()
128 while (parent && em->start < entry->start) { in tree_insert()
133 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert()
136 rb_link_node(&em->rb_node, orig_parent, p); in tree_insert()
137 rb_insert_color_cached(&em->rb_node, root, leftmost); in tree_insert()
231 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) in try_merge_map() argument
236 if (em->start != 0) { in try_merge_map()
237 rb = rb_prev(&em->rb_node); in try_merge_map()
240 if (rb && mergable_maps(merge, em)) { in try_merge_map()
241 em->start = merge->start; in try_merge_map()
242 em->orig_start = merge->orig_start; in try_merge_map()
243 em->len += merge->len; in try_merge_map()
244 em->block_len += merge->block_len; in try_merge_map()
245 em->block_start = merge->block_start; in try_merge_map()
246 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start; in try_merge_map()
247 em->mod_start = merge->mod_start; in try_merge_map()
248 em->generation = max(em->generation, merge->generation); in try_merge_map()
256 rb = rb_next(&em->rb_node); in try_merge_map()
259 if (rb && mergable_maps(em, merge)) { in try_merge_map()
260 em->len += merge->len; in try_merge_map()
261 em->block_len += merge->block_len; in try_merge_map()
264 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start; in try_merge_map()
265 em->generation = max(em->generation, merge->generation); in try_merge_map()
285 struct extent_map *em; in unpin_extent_cache() local
289 em = lookup_extent_mapping(tree, start, len); in unpin_extent_cache()
291 WARN_ON(!em || em->start != start); in unpin_extent_cache()
293 if (!em) in unpin_extent_cache()
296 em->generation = gen; in unpin_extent_cache()
297 clear_bit(EXTENT_FLAG_PINNED, &em->flags); in unpin_extent_cache()
298 em->mod_start = em->start; in unpin_extent_cache()
299 em->mod_len = em->len; in unpin_extent_cache()
301 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) { in unpin_extent_cache()
303 clear_bit(EXTENT_FLAG_FILLING, &em->flags); in unpin_extent_cache()
306 try_merge_map(tree, em); in unpin_extent_cache()
309 em->mod_start = em->start; in unpin_extent_cache()
310 em->mod_len = em->len; in unpin_extent_cache()
313 free_extent_map(em); in unpin_extent_cache()
320 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) in clear_em_logging() argument
322 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); in clear_em_logging()
323 if (extent_map_in_tree(em)) in clear_em_logging()
324 try_merge_map(tree, em); in clear_em_logging()
328 struct extent_map *em, in setup_extent_mapping() argument
331 refcount_inc(&em->refs); in setup_extent_mapping()
332 em->mod_start = em->start; in setup_extent_mapping()
333 em->mod_len = em->len; in setup_extent_mapping()
336 list_move(&em->list, &tree->modified_extents); in setup_extent_mapping()
338 try_merge_map(tree, em); in setup_extent_mapping()
341 static void extent_map_device_set_bits(struct extent_map *em, unsigned bits) in extent_map_device_set_bits() argument
343 struct map_lookup *map = em->map_lookup; in extent_map_device_set_bits()
344 u64 stripe_size = em->orig_block_len; in extent_map_device_set_bits()
356 static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits) in extent_map_device_clear_bits() argument
358 struct map_lookup *map = em->map_lookup; in extent_map_device_clear_bits()
359 u64 stripe_size = em->orig_block_len; in extent_map_device_clear_bits()
383 struct extent_map *em, int modified) in add_extent_mapping() argument
389 ret = tree_insert(&tree->map, em); in add_extent_mapping()
393 setup_extent_mapping(tree, em, modified); in add_extent_mapping()
394 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) { in add_extent_mapping()
395 extent_map_device_set_bits(em, CHUNK_ALLOCATED); in add_extent_mapping()
396 extent_map_device_clear_bits(em, CHUNK_TRIMMED); in add_extent_mapping()
406 struct extent_map *em; in __lookup_extent_mapping() local
422 em = rb_entry(rb_node, struct extent_map, rb_node); in __lookup_extent_mapping()
424 if (strict && !(end > em->start && start < extent_map_end(em))) in __lookup_extent_mapping()
427 refcount_inc(&em->refs); in __lookup_extent_mapping()
428 return em; in __lookup_extent_mapping()
473 void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) in remove_extent_mapping() argument
475 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); in remove_extent_mapping()
476 rb_erase_cached(&em->rb_node, &tree->map); in remove_extent_mapping()
477 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) in remove_extent_mapping()
478 list_del_init(&em->list); in remove_extent_mapping()
479 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) in remove_extent_mapping()
480 extent_map_device_clear_bits(em, CHUNK_ALLOCATED); in remove_extent_mapping()
481 RB_CLEAR_NODE(&em->rb_node); in remove_extent_mapping()
499 static struct extent_map *next_extent_map(struct extent_map *em) in next_extent_map() argument
503 next = rb_next(&em->rb_node); in next_extent_map()
509 static struct extent_map *prev_extent_map(struct extent_map *em) in prev_extent_map() argument
513 prev = rb_prev(&em->rb_node); in prev_extent_map()
527 struct extent_map *em, in merge_extent_mapping() argument
536 BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); in merge_extent_mapping()
546 start = prev ? extent_map_end(prev) : em->start; in merge_extent_mapping()
547 start = max_t(u64, start, em->start); in merge_extent_mapping()
548 end = next ? next->start : extent_map_end(em); in merge_extent_mapping()
549 end = min_t(u64, end, extent_map_end(em)); in merge_extent_mapping()
550 start_diff = start - em->start; in merge_extent_mapping()
551 em->start = start; in merge_extent_mapping()
552 em->len = end - start; in merge_extent_mapping()
553 if (em->block_start < EXTENT_MAP_LAST_BYTE && in merge_extent_mapping()
554 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { in merge_extent_mapping()
555 em->block_start += start_diff; in merge_extent_mapping()
556 em->block_len = em->len; in merge_extent_mapping()
558 return add_extent_mapping(em_tree, em, 0); in merge_extent_mapping()
586 struct extent_map *em = *em_in; in btrfs_add_extent_mapping() local
588 ret = add_extent_mapping(em_tree, em, 0); in btrfs_add_extent_mapping()
600 trace_btrfs_handle_em_exist(fs_info, existing, em, start, len); in btrfs_add_extent_mapping()
608 free_extent_map(em); in btrfs_add_extent_mapping()
612 u64 orig_start = em->start; in btrfs_add_extent_mapping()
613 u64 orig_len = em->len; in btrfs_add_extent_mapping()
620 em, start); in btrfs_add_extent_mapping()
622 free_extent_map(em); in btrfs_add_extent_mapping()