Lines Matching +full:child +full:- +full:node

7 #include "dm-btree.h"
8 #include "dm-btree-internal.h"
9 #include "dm-transaction-manager.h"
12 #include <linux/device-mapper.h>
20 * A very important constraint for our btree is that no node, except the
28 * Each node may have a left or right sibling. When decending the spine,
29 * if a node contains only MIN_ENTRIES then we try and increase this to at
32 * [A] No siblings => this can only happen if the node is the root, in which
36 * ==> rebalance(node, right sibling)
39 * ==> rebalance(left sibling, node)
41 * [D] Both siblings, total_entries(left, node, right) <= DEL_THRESHOLD
42 * ==> delete node adding it's contents to left and right
44 * [E] Both siblings, total_entries(left, node, right) > DEL_THRESHOLD
45 * ==> rebalance(left, node, right)
47 * After these operations it's possible that the our original node no
49 * is performed on the children of the current node. This also avoids
52 * Once this rebalancing has occurred we can then step into the child node
57 * Some little utilities for moving node data around.
61 uint32_t nr_entries = le32_to_cpu(n->header.nr_entries); in node_shift()
62 uint32_t value_size = le32_to_cpu(n->header.value_size); in node_shift()
65 shift = -shift; in node_shift()
70 (nr_entries - shift) * sizeof(__le64)); in node_shift()
73 (nr_entries - shift) * value_size); in node_shift()
75 BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries)); in node_shift()
87 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); in node_copy()
88 uint32_t value_size = le32_to_cpu(left->header.value_size); in node_copy()
89 if (value_size != le32_to_cpu(right->header.value_size)) { in node_copy()
91 return -EILSEQ; in node_copy()
95 shift = -shift; in node_copy()
97 if (nr_left + shift > le32_to_cpu(left->header.max_entries)) { in node_copy()
99 return -EINVAL; in node_copy()
109 if (shift > le32_to_cpu(right->header.max_entries)) { in node_copy()
111 return -EINVAL; in node_copy()
115 key_ptr(left, nr_left - shift), in node_copy()
118 value_ptr(left, nr_left - shift), in node_copy()
125 * Delete a specific entry from a leaf node.
129 unsigned nr_entries = le32_to_cpu(n->header.nr_entries); in delete_at()
130 unsigned nr_to_copy = nr_entries - (index + 1); in delete_at()
131 uint32_t value_size = le32_to_cpu(n->header.value_size); in delete_at()
144 n->header.nr_entries = cpu_to_le32(nr_entries - 1); in delete_at()
149 return le32_to_cpu(n->header.max_entries) / 3; in merge_threshold()
152 struct child { struct
160 unsigned index, struct child *result) in init_child() argument
165 result->index = index; in init_child()
168 r = dm_tm_shadow_block(info->tm, root, &btree_node_validator, in init_child()
169 &result->block, &inc); in init_child()
173 result->n = dm_block_data(result->block); in init_child()
176 inc_children(info->tm, result->n, vt); in init_child()
179 cpu_to_le64(dm_block_location(result->block)); in init_child()
184 static void exit_child(struct dm_btree_info *info, struct child *c) in exit_child()
186 dm_tm_unlock(info->tm, c->block); in exit_child()
192 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); in shift()
193 uint32_t nr_right = le32_to_cpu(right->header.nr_entries); in shift()
194 uint32_t max_entries = le32_to_cpu(left->header.max_entries); in shift()
195 uint32_t r_max_entries = le32_to_cpu(right->header.max_entries); in shift()
198 DMERR("node max_entries mismatch"); in shift()
199 return -EILSEQ; in shift()
202 if (nr_left - count > max_entries) { in shift()
203 DMERR("node shift out of bounds"); in shift()
204 return -EINVAL; in shift()
208 DMERR("node shift out of bounds"); in shift()
209 return -EINVAL; in shift()
227 left->header.nr_entries = cpu_to_le32(nr_left - count); in shift()
228 right->header.nr_entries = cpu_to_le32(nr_right + count); in shift()
234 struct child *l, struct child *r) in __rebalance2()
237 struct btree_node *left = l->n; in __rebalance2()
238 struct btree_node *right = r->n; in __rebalance2()
239 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); in __rebalance2()
240 uint32_t nr_right = le32_to_cpu(right->header.nr_entries); in __rebalance2()
242 * Ensure the number of entries in each child will be greater in __rebalance2()
244 * child is used for removal, the number will still be not in __rebalance2()
253 node_copy(left, right, -nr_right); in __rebalance2()
254 left->header.nr_entries = cpu_to_le32(nr_left + nr_right); in __rebalance2()
255 delete_at(parent, r->index); in __rebalance2()
261 dm_tm_dec(info->tm, dm_block_location(r->block)); in __rebalance2()
267 ret = shift(left, right, nr_left - target_left); in __rebalance2()
270 *key_ptr(parent, r->index) = right->keys[0]; in __rebalance2()
280 struct child left, right; in rebalance2()
308 struct child *l, struct child *c, struct child *r, in delete_center_node()
312 uint32_t max_entries = le32_to_cpu(left->header.max_entries); in delete_center_node()
313 unsigned shift = min(max_entries - nr_left, nr_center); in delete_center_node()
316 DMERR("node shift out of bounds"); in delete_center_node()
317 return -EINVAL; in delete_center_node()
320 node_copy(left, center, -shift); in delete_center_node()
321 left->header.nr_entries = cpu_to_le32(nr_left + shift); in delete_center_node()
324 shift = nr_center - shift; in delete_center_node()
327 DMERR("node shift out of bounds"); in delete_center_node()
328 return -EINVAL; in delete_center_node()
333 right->header.nr_entries = cpu_to_le32(nr_right + shift); in delete_center_node()
335 *key_ptr(parent, r->index) = right->keys[0]; in delete_center_node()
337 delete_at(parent, c->index); in delete_center_node()
338 r->index--; in delete_center_node()
340 dm_tm_dec(info->tm, dm_block_location(c->block)); in delete_center_node()
348 struct child *l, struct child *c, struct child *r, in redistribute3()
353 uint32_t max_entries = le32_to_cpu(left->header.max_entries); in redistribute3()
363 s = nr_left - target_left; in redistribute3()
365 if (s < 0 && nr_center < -s) { in redistribute3()
366 /* not enough in central node */ in redistribute3()
367 ret = shift(left, center, -nr_center); in redistribute3()
383 ret = shift(center, right, target_right - nr_right); in redistribute3()
387 s = target_right - nr_right; in redistribute3()
389 /* not enough in central node */ in redistribute3()
393 s -= nr_center; in redistribute3()
397 nr_left -= s; in redistribute3()
404 ret = shift(left, center, nr_left - target_left); in redistribute3()
409 *key_ptr(parent, c->index) = center->keys[0]; in redistribute3()
410 *key_ptr(parent, r->index) = right->keys[0]; in redistribute3()
415 struct child *l, struct child *c, struct child *r) in __rebalance3()
417 struct btree_node *left = l->n; in __rebalance3()
418 struct btree_node *center = c->n; in __rebalance3()
419 struct btree_node *right = r->n; in __rebalance3()
421 uint32_t nr_left = le32_to_cpu(left->header.nr_entries); in __rebalance3()
422 uint32_t nr_center = le32_to_cpu(center->header.nr_entries); in __rebalance3()
423 uint32_t nr_right = le32_to_cpu(right->header.nr_entries); in __rebalance3()
427 if ((left->header.max_entries != center->header.max_entries) || in __rebalance3()
428 (center->header.max_entries != right->header.max_entries)) { in __rebalance3()
430 return -EILSEQ; in __rebalance3()
447 struct child left, center, right; in rebalance3()
487 if (le32_to_cpu(n->header.nr_entries) == 1) { in rebalance_children()
488 struct dm_block *child; in rebalance_children() local
491 r = dm_tm_read_lock(info->tm, b, &btree_node_validator, &child); in rebalance_children()
495 memcpy(n, dm_block_data(child), in rebalance_children()
496 dm_bm_block_size(dm_tm_get_bm(info->tm))); in rebalance_children()
498 dm_tm_dec(info->tm, dm_block_location(child)); in rebalance_children()
499 dm_tm_unlock(info->tm, child); in rebalance_children()
505 return -ENODATA; in rebalance_children()
508 has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); in rebalance_children()
514 r = rebalance2(s, info, vt, i - 1); in rebalance_children()
517 r = rebalance3(s, info, vt, i - 1); in rebalance_children()
527 (i >= le32_to_cpu(n->header.nr_entries)) || in do_leaf()
528 (le64_to_cpu(n->keys[i]) != key)) in do_leaf()
529 return -ENODATA; in do_leaf()
553 * We have to patch up the parent node, ugly, but I don't in remove_raw()
565 if (le32_to_cpu(n->header.flags) & LEAF_NODE) in remove_raw()
573 if (le32_to_cpu(n->header.flags) & LEAF_NODE) in remove_raw()
581 * -ENODATA in remove_raw()
592 unsigned level, last_level = info->levels - 1; in dm_btree_remove()
598 init_le64_type(info->tm, &le64_vt); in dm_btree_remove()
600 for (level = 0; level < info->levels; level++) { in dm_btree_remove()
603 &info->value_type : &le64_vt), in dm_btree_remove()
614 BUG_ON(index < 0 || index >= le32_to_cpu(n->header.nr_entries)); in dm_btree_remove()
616 if (info->value_type.dec) in dm_btree_remove()
617 info->value_type.dec(info->value_type.context, in dm_btree_remove()
631 /*----------------------------------------------------------------*/
646 * We have to patch up the parent node, ugly, but I don't in remove_nearest()
658 if (le32_to_cpu(n->header.flags) & LEAF_NODE) { in remove_nearest()
668 if (le32_to_cpu(n->header.flags) & LEAF_NODE) { in remove_nearest()
678 * -ENODATA in remove_nearest()
690 unsigned level, last_level = info->levels - 1; in remove_one()
697 init_le64_type(info->tm, &le64_vt); in remove_one()
709 r = remove_nearest(&spine, info, &info->value_type, in remove_one()
719 if (index >= le32_to_cpu(n->header.nr_entries)) { in remove_one()
720 r = -ENODATA; in remove_one()
724 k = le64_to_cpu(n->keys[index]); in remove_one()
726 if (info->value_type.dec) in remove_one()
727 info->value_type.dec(info->value_type.context, in remove_one()
734 r = -ENODATA; in remove_one()
757 return r == -ENODATA ? 0 : r; in dm_btree_remove_leaves()