Lines Matching full:rb
37 * an RB-tree is used instead. At least if we expect heavy fragmentation.
155 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, in INTERVAL_TREE_DEFINE() argument
171 struct rb_node **link, *rb; in drm_mm_interval_tree_add_node() local
178 rb = &hole_node->rb; in drm_mm_interval_tree_add_node()
179 while (rb) { in drm_mm_interval_tree_add_node()
180 parent = rb_entry(rb, struct drm_mm_node, rb); in drm_mm_interval_tree_add_node()
185 rb = rb_parent(rb); in drm_mm_interval_tree_add_node()
188 rb = &hole_node->rb; in drm_mm_interval_tree_add_node()
189 link = &hole_node->rb.rb_right; in drm_mm_interval_tree_add_node()
192 rb = NULL; in drm_mm_interval_tree_add_node()
198 rb = *link; in drm_mm_interval_tree_add_node()
199 parent = rb_entry(rb, struct drm_mm_node, rb); in drm_mm_interval_tree_add_node()
203 link = &parent->rb.rb_left; in drm_mm_interval_tree_add_node()
205 link = &parent->rb.rb_right; in drm_mm_interval_tree_add_node()
210 rb_link_node(&node->rb, rb, link); in drm_mm_interval_tree_add_node()
211 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, in drm_mm_interval_tree_add_node()
218 static u64 rb_to_hole_size(struct rb_node *rb) in rb_to_hole_size() argument
220 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; in rb_to_hole_size()
226 struct rb_node **link = &root->rb_root.rb_node, *rb = NULL; in insert_hole_size() local
231 rb = *link; in insert_hole_size()
232 if (x > rb_to_hole_size(rb)) { in insert_hole_size()
233 link = &rb->rb_left; in insert_hole_size()
235 link = &rb->rb_right; in insert_hole_size()
240 rb_link_node(&node->rb_hole_size, rb, link); in insert_hole_size()
298 static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb) in rb_hole_size_to_node() argument
300 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size); in rb_hole_size_to_node()
303 static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb) in rb_hole_addr_to_node() argument
305 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr); in rb_hole_addr_to_node()
310 struct rb_node *rb = mm->holes_size.rb_root.rb_node; in best_hole() local
315 rb_entry(rb, struct drm_mm_node, rb_hole_size); in best_hole()
319 rb = rb->rb_right; in best_hole()
321 rb = rb->rb_left; in best_hole()
323 } while (rb); in best_hole()
328 static bool usable_hole_addr(struct rb_node *rb, u64 size) in usable_hole_addr() argument
330 return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size; in usable_hole_addr()
335 struct rb_node *rb = mm->holes_addr.rb_node; in find_hole_addr() local
338 while (rb) { in find_hole_addr()
341 if (!usable_hole_addr(rb, size)) in find_hole_addr()
344 node = rb_hole_addr_to_node(rb); in find_hole_addr()
348 rb = node->rb_hole_addr.rb_left; in find_hole_addr()
350 rb = node->rb_hole_addr.rb_right; in find_hole_addr()
384 * @first: first rb member to traverse (either rb_left or rb_right).
385 * @last: last rb member to traverse (either rb_right or rb_left).
387 * This macro declares a function to return the next hole of the addr rb tree.
496 static u64 rb_to_hole_size_or_zero(struct rb_node *rb) in rb_to_hole_size_or_zero() argument
498 return rb ? rb_to_hole_size(rb) : 0; in rb_to_hole_size_or_zero()
673 rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree); in drm_mm_replace_node()