Lines Matching refs:wr_mas
2101 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas, in mas_store_b_node() argument
2108 struct ma_state *mas = wr_mas->mas; in mas_store_b_node()
2110 b_node->type = wr_mas->type; in mas_store_b_node()
2123 b_node->slot[b_end] = wr_mas->content; in mas_store_b_node()
2124 if (!wr_mas->content) in mas_store_b_node()
2131 b_node->slot[b_end] = wr_mas->entry; in mas_store_b_node()
2139 piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type); in mas_store_b_node()
2142 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type); in mas_store_b_node()
2145 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, in mas_store_b_node()
2148 b_node->slot[++b_end] = wr_mas->content; in mas_store_b_node()
2149 if (!wr_mas->content) in mas_store_b_node()
2155 if (slot > wr_mas->node_end) in mas_store_b_node()
2159 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end); in mas_store_b_node()
2235 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) in mas_wr_node_walk() argument
2237 struct ma_state *mas = wr_mas->mas; in mas_wr_node_walk()
2240 if (unlikely(ma_is_dense(wr_mas->type))) { in mas_wr_node_walk()
2241 wr_mas->r_max = wr_mas->r_min = mas->index; in mas_wr_node_walk()
2246 wr_mas->node = mas_mn(wr_mas->mas); in mas_wr_node_walk()
2247 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type); in mas_wr_node_walk()
2248 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type, in mas_wr_node_walk()
2249 wr_mas->pivots, mas->max); in mas_wr_node_walk()
2252 while (offset < count && mas->index > wr_mas->pivots[offset]) in mas_wr_node_walk()
2255 wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max; in mas_wr_node_walk()
2256 wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset); in mas_wr_node_walk()
2257 wr_mas->offset_end = mas->offset = offset; in mas_wr_node_walk()
2350 MA_WR_STATE(wr_mas, mast->orig_r, NULL); in mast_ascend()
2360 wr_mas.type = mte_node_type(mast->orig_r->node); in mast_ascend()
2361 mas_wr_node_walk(&wr_mas); in mast_ascend()
2365 wr_mas.mas = mast->orig_l; in mast_ascend()
2366 wr_mas.type = mte_node_type(mast->orig_l->node); in mast_ascend()
2367 mas_wr_node_walk(&wr_mas); in mast_ascend()
2369 mast->bn->type = wr_mas.type; in mast_ascend()
3444 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas, in mas_reuse_node() argument
3448 if (mt_in_rcu(wr_mas->mas->tree)) in mas_reuse_node()
3452 int clear = mt_slots[wr_mas->type] - bn->b_end; in mas_reuse_node()
3454 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--); in mas_reuse_node()
3455 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear); in mas_reuse_node()
3457 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false); in mas_reuse_node()
3467 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas, in mas_commit_b_node() argument
3475 old_enode = wr_mas->mas->node; in mas_commit_b_node()
3478 (mas_mt_height(wr_mas->mas) > 1)) in mas_commit_b_node()
3479 return mas_rebalance(wr_mas->mas, b_node); in mas_commit_b_node()
3482 return mas_split(wr_mas->mas, b_node); in mas_commit_b_node()
3484 if (mas_reuse_node(wr_mas, b_node, end)) in mas_commit_b_node()
3487 mas_node_count(wr_mas->mas, 1); in mas_commit_b_node()
3488 if (mas_is_err(wr_mas->mas)) in mas_commit_b_node()
3491 node = mas_pop_node(wr_mas->mas); in mas_commit_b_node()
3492 node->parent = mas_mn(wr_mas->mas)->parent; in mas_commit_b_node()
3493 wr_mas->mas->node = mt_mk_node(node, b_type); in mas_commit_b_node()
3494 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false); in mas_commit_b_node()
3495 mas_replace_node(wr_mas->mas, old_enode); in mas_commit_b_node()
3497 mas_update_gap(wr_mas->mas); in mas_commit_b_node()
3573 static bool mas_is_span_wr(struct ma_wr_state *wr_mas) in mas_is_span_wr() argument
3575 unsigned long max = wr_mas->r_max; in mas_is_span_wr()
3576 unsigned long last = wr_mas->mas->last; in mas_is_span_wr()
3577 enum maple_type type = wr_mas->type; in mas_is_span_wr()
3578 void *entry = wr_mas->entry; in mas_is_span_wr()
3585 max = wr_mas->mas->max; in mas_is_span_wr()
3599 trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry); in mas_is_span_wr()
3603 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas) in mas_wr_walk_descend() argument
3605 wr_mas->type = mte_node_type(wr_mas->mas->node); in mas_wr_walk_descend()
3606 mas_wr_node_walk(wr_mas); in mas_wr_walk_descend()
3607 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type); in mas_wr_walk_descend()
3610 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas) in mas_wr_walk_traverse() argument
3612 wr_mas->mas->max = wr_mas->r_max; in mas_wr_walk_traverse()
3613 wr_mas->mas->min = wr_mas->r_min; in mas_wr_walk_traverse()
3614 wr_mas->mas->node = wr_mas->content; in mas_wr_walk_traverse()
3615 wr_mas->mas->offset = 0; in mas_wr_walk_traverse()
3616 wr_mas->mas->depth++; in mas_wr_walk_traverse()
3626 static bool mas_wr_walk(struct ma_wr_state *wr_mas) in mas_wr_walk() argument
3628 struct ma_state *mas = wr_mas->mas; in mas_wr_walk()
3631 mas_wr_walk_descend(wr_mas); in mas_wr_walk()
3632 if (unlikely(mas_is_span_wr(wr_mas))) in mas_wr_walk()
3635 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, in mas_wr_walk()
3637 if (ma_is_leaf(wr_mas->type)) in mas_wr_walk()
3640 mas_wr_walk_traverse(wr_mas); in mas_wr_walk()
3646 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) in mas_wr_walk_index() argument
3648 struct ma_state *mas = wr_mas->mas; in mas_wr_walk_index()
3651 mas_wr_walk_descend(wr_mas); in mas_wr_walk_index()
3652 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, in mas_wr_walk_index()
3654 if (ma_is_leaf(wr_mas->type)) in mas_wr_walk_index()
3656 mas_wr_walk_traverse(wr_mas); in mas_wr_walk_index()
3822 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) in mas_wr_spanning_store() argument
3832 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry); in mas_wr_spanning_store()
3833 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry); in mas_wr_spanning_store()
3847 mas = wr_mas->mas; in mas_wr_spanning_store()
3851 return mas_new_root(mas, wr_mas->entry); in mas_wr_spanning_store()
3879 if (!wr_mas->entry) { in mas_wr_spanning_store()
3889 return mas_new_root(mas, wr_mas->entry); in mas_wr_spanning_store()
3920 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, in mas_wr_node_store() argument
3923 struct ma_state *mas = wr_mas->mas; in mas_wr_node_store()
3926 unsigned char dst_offset, offset_end = wr_mas->offset_end; in mas_wr_node_store()
3928 unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type]; in mas_wr_node_store()
3932 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) && in mas_wr_node_store()
3936 if (mas->last == wr_mas->end_piv) in mas_wr_node_store()
3938 else if (unlikely(wr_mas->r_max == ULONG_MAX)) in mas_wr_node_store()
3939 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); in mas_wr_node_store()
3954 dst_pivots = ma_pivots(newnode, wr_mas->type); in mas_wr_node_store()
3955 dst_slots = ma_slots(newnode, wr_mas->type); in mas_wr_node_store()
3957 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset); in mas_wr_node_store()
3958 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset); in mas_wr_node_store()
3961 if (wr_mas->r_min < mas->index) { in mas_wr_node_store()
3962 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content); in mas_wr_node_store()
3969 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry); in mas_wr_node_store()
3975 if (offset_end > wr_mas->node_end) in mas_wr_node_store()
3980 copy_size = wr_mas->node_end - offset_end + 1; in mas_wr_node_store()
3981 memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end, in mas_wr_node_store()
3983 memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end, in mas_wr_node_store()
3994 mas->node = mt_mk_node(newnode, wr_mas->type); in mas_wr_node_store()
3997 memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); in mas_wr_node_store()
3999 trace_ma_write(__func__, mas, 0, wr_mas->entry); in mas_wr_node_store()
4010 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas) in mas_wr_slot_store() argument
4012 struct ma_state *mas = wr_mas->mas; in mas_wr_slot_store()
4014 void __rcu **slots = wr_mas->slots; in mas_wr_slot_store()
4020 if (wr_mas->offset_end - offset == 1) { in mas_wr_slot_store()
4021 if (mas->index == wr_mas->r_min) { in mas_wr_slot_store()
4023 rcu_assign_pointer(slots[offset], wr_mas->entry); in mas_wr_slot_store()
4024 wr_mas->pivots[offset] = mas->last; in mas_wr_slot_store()
4027 rcu_assign_pointer(slots[offset + 1], wr_mas->entry); in mas_wr_slot_store()
4028 wr_mas->pivots[offset] = mas->index - 1; in mas_wr_slot_store()
4037 rcu_assign_pointer(slots[offset + 1], wr_mas->entry); in mas_wr_slot_store()
4038 wr_mas->pivots[offset] = mas->index - 1; in mas_wr_slot_store()
4039 wr_mas->pivots[offset + 1] = mas->last; in mas_wr_slot_store()
4045 trace_ma_write(__func__, mas, 0, wr_mas->entry); in mas_wr_slot_store()
4050 if (!wr_mas->entry || gap) in mas_wr_slot_store()
4056 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) in mas_wr_extend_null() argument
4058 struct ma_state *mas = wr_mas->mas; in mas_wr_extend_null()
4060 if (!wr_mas->slots[wr_mas->offset_end]) { in mas_wr_extend_null()
4062 mas->last = wr_mas->end_piv; in mas_wr_extend_null()
4065 if ((mas->last == wr_mas->end_piv) && in mas_wr_extend_null()
4066 (wr_mas->node_end != wr_mas->offset_end) && in mas_wr_extend_null()
4067 !wr_mas->slots[wr_mas->offset_end + 1]) { in mas_wr_extend_null()
4068 wr_mas->offset_end++; in mas_wr_extend_null()
4069 if (wr_mas->offset_end == wr_mas->node_end) in mas_wr_extend_null()
4072 mas->last = wr_mas->pivots[wr_mas->offset_end]; in mas_wr_extend_null()
4073 wr_mas->end_piv = mas->last; in mas_wr_extend_null()
4077 if (!wr_mas->content) { in mas_wr_extend_null()
4079 mas->index = wr_mas->r_min; in mas_wr_extend_null()
4082 if (mas->index == wr_mas->r_min && mas->offset && in mas_wr_extend_null()
4083 !wr_mas->slots[mas->offset - 1]) { in mas_wr_extend_null()
4085 wr_mas->r_min = mas->index = in mas_wr_extend_null()
4086 mas_safe_min(mas, wr_mas->pivots, mas->offset); in mas_wr_extend_null()
4087 wr_mas->r_max = wr_mas->pivots[mas->offset]; in mas_wr_extend_null()
4092 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) in mas_wr_end_piv() argument
4094 while ((wr_mas->offset_end < wr_mas->node_end) && in mas_wr_end_piv()
4095 (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end])) in mas_wr_end_piv()
4096 wr_mas->offset_end++; in mas_wr_end_piv()
4098 if (wr_mas->offset_end < wr_mas->node_end) in mas_wr_end_piv()
4099 wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end]; in mas_wr_end_piv()
4101 wr_mas->end_piv = wr_mas->mas->max; in mas_wr_end_piv()
4103 if (!wr_mas->entry) in mas_wr_end_piv()
4104 mas_wr_extend_null(wr_mas); in mas_wr_end_piv()
4107 static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas) in mas_wr_new_end() argument
4109 struct ma_state *mas = wr_mas->mas; in mas_wr_new_end()
4110 unsigned char new_end = wr_mas->node_end + 2; in mas_wr_new_end()
4112 new_end -= wr_mas->offset_end - mas->offset; in mas_wr_new_end()
4113 if (wr_mas->r_min == mas->index) in mas_wr_new_end()
4116 if (wr_mas->end_piv == mas->last) in mas_wr_new_end()
4133 static inline bool mas_wr_append(struct ma_wr_state *wr_mas, in mas_wr_append() argument
4140 mas = wr_mas->mas; in mas_wr_append()
4144 if (mas->offset != wr_mas->node_end) in mas_wr_append()
4147 end = wr_mas->node_end; in mas_wr_append()
4151 if (new_end < mt_pivots[wr_mas->type]) { in mas_wr_append()
4152 wr_mas->pivots[new_end] = wr_mas->pivots[end]; in mas_wr_append()
4153 ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end); in mas_wr_append()
4156 slots = wr_mas->slots; in mas_wr_append()
4158 if (mas->last == wr_mas->r_max) { in mas_wr_append()
4160 rcu_assign_pointer(slots[new_end], wr_mas->entry); in mas_wr_append()
4161 wr_mas->pivots[end] = mas->index - 1; in mas_wr_append()
4165 rcu_assign_pointer(slots[new_end], wr_mas->content); in mas_wr_append()
4166 wr_mas->pivots[end] = mas->last; in mas_wr_append()
4167 rcu_assign_pointer(slots[end], wr_mas->entry); in mas_wr_append()
4171 rcu_assign_pointer(slots[new_end], wr_mas->content); in mas_wr_append()
4172 wr_mas->pivots[end + 1] = mas->last; in mas_wr_append()
4173 rcu_assign_pointer(slots[end + 1], wr_mas->entry); in mas_wr_append()
4174 wr_mas->pivots[end] = mas->index - 1; in mas_wr_append()
4178 if (!wr_mas->content || !wr_mas->entry) in mas_wr_append()
4181 trace_ma_write(__func__, mas, new_end, wr_mas->entry); in mas_wr_append()
4191 static void mas_wr_bnode(struct ma_wr_state *wr_mas) in mas_wr_bnode() argument
4195 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); in mas_wr_bnode()
4197 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); in mas_wr_bnode()
4198 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end); in mas_wr_bnode()
4201 static inline void mas_wr_modify(struct ma_wr_state *wr_mas) in mas_wr_modify() argument
4203 struct ma_state *mas = wr_mas->mas; in mas_wr_modify()
4207 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) { in mas_wr_modify()
4208 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); in mas_wr_modify()
4209 if (!!wr_mas->entry ^ !!wr_mas->content) in mas_wr_modify()
4218 new_end = mas_wr_new_end(wr_mas); in mas_wr_modify()
4219 if (new_end >= mt_slots[wr_mas->type]) in mas_wr_modify()
4223 if (mas_wr_append(wr_mas, new_end)) in mas_wr_modify()
4226 if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas)) in mas_wr_modify()
4229 if (mas_wr_node_store(wr_mas, new_end)) in mas_wr_modify()
4236 mas_wr_bnode(wr_mas); in mas_wr_modify()
4246 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas) in mas_wr_store_entry() argument
4248 struct ma_state *mas = wr_mas->mas; in mas_wr_store_entry()
4250 wr_mas->content = mas_start(mas); in mas_wr_store_entry()
4252 mas_store_root(mas, wr_mas->entry); in mas_wr_store_entry()
4253 return wr_mas->content; in mas_wr_store_entry()
4256 if (unlikely(!mas_wr_walk(wr_mas))) { in mas_wr_store_entry()
4257 mas_wr_spanning_store(wr_mas); in mas_wr_store_entry()
4258 return wr_mas->content; in mas_wr_store_entry()
4262 mas_wr_end_piv(wr_mas); in mas_wr_store_entry()
4265 mas_new_root(mas, wr_mas->entry); in mas_wr_store_entry()
4266 return wr_mas->content; in mas_wr_store_entry()
4269 mas_wr_modify(wr_mas); in mas_wr_store_entry()
4270 return wr_mas->content; in mas_wr_store_entry()
4283 MA_WR_STATE(wr_mas, mas, entry); in mas_insert()
4299 wr_mas.content = mas_start(mas); in mas_insert()
4300 if (wr_mas.content) in mas_insert()
4309 if (!mas_wr_walk(&wr_mas)) in mas_insert()
4313 wr_mas.offset_end = mas->offset; in mas_insert()
4314 wr_mas.end_piv = wr_mas.r_max; in mas_insert()
4316 if (wr_mas.content || (mas->last > wr_mas.r_max)) in mas_insert()
4322 mas_wr_modify(&wr_mas); in mas_insert()
4323 return wr_mas.content; in mas_insert()
4327 return wr_mas.content; in mas_insert()
5340 static void mas_wr_store_setup(struct ma_wr_state *wr_mas) in mas_wr_store_setup() argument
5342 if (!mas_is_active(wr_mas->mas)) { in mas_wr_store_setup()
5343 if (mas_is_start(wr_mas->mas)) in mas_wr_store_setup()
5346 if (unlikely(mas_is_paused(wr_mas->mas))) in mas_wr_store_setup()
5349 if (unlikely(mas_is_none(wr_mas->mas))) in mas_wr_store_setup()
5352 if (unlikely(mas_is_overflow(wr_mas->mas))) in mas_wr_store_setup()
5355 if (unlikely(mas_is_underflow(wr_mas->mas))) in mas_wr_store_setup()
5364 if (wr_mas->mas->last > wr_mas->mas->max) in mas_wr_store_setup()
5367 if (wr_mas->entry) in mas_wr_store_setup()
5370 if (mte_is_leaf(wr_mas->mas->node) && in mas_wr_store_setup()
5371 wr_mas->mas->last == wr_mas->mas->max) in mas_wr_store_setup()
5377 mas_reset(wr_mas->mas); in mas_wr_store_setup()
5395 MA_WR_STATE(wr_mas, mas, entry); in mas_store()
5415 mas_wr_store_setup(&wr_mas); in mas_store()
5416 mas_wr_store_entry(&wr_mas); in mas_store()
5417 return wr_mas.content; in mas_store()
5432 MA_WR_STATE(wr_mas, mas, entry); in mas_store_gfp()
5434 mas_wr_store_setup(&wr_mas); in mas_store_gfp()
5437 mas_wr_store_entry(&wr_mas); in mas_store_gfp()
5456 MA_WR_STATE(wr_mas, mas, entry); in mas_store_prealloc()
5458 mas_wr_store_setup(&wr_mas); in mas_store_prealloc()
5460 mas_wr_store_entry(&wr_mas); in mas_store_prealloc()
5461 MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); in mas_store_prealloc()
5476 MA_WR_STATE(wr_mas, mas, entry); in mas_preallocate()
5485 mas_wr_store_setup(&wr_mas); in mas_preallocate()
5486 wr_mas.content = mas_start(mas); in mas_preallocate()
5491 if (unlikely(!mas_wr_walk(&wr_mas))) { in mas_preallocate()
5499 if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last) in mas_preallocate()
5502 mas_wr_end_piv(&wr_mas); in mas_preallocate()
5503 node_size = mas_wr_new_end(&wr_mas); in mas_preallocate()
5504 if (node_size >= mt_slots[wr_mas.type]) { in mas_preallocate()
5515 if (node_size - 1 <= mt_min_slots[wr_mas.type]) in mas_preallocate()
6162 MA_WR_STATE(wr_mas, mas, NULL); in mas_erase()
6175 mas_wr_store_setup(&wr_mas); in mas_erase()
6176 mas_wr_store_entry(&wr_mas); in mas_erase()
6274 MA_WR_STATE(wr_mas, &mas, entry); in mtree_store_range()
6285 mas_wr_store_entry(&wr_mas); in mtree_store_range()
7247 void mas_wr_dump(const struct ma_wr_state *wr_mas) in mas_wr_dump() argument
7250 wr_mas->node, wr_mas->r_min, wr_mas->r_max); in mas_wr_dump()
7252 wr_mas->type, wr_mas->offset_end, wr_mas->node_end, in mas_wr_dump()
7253 wr_mas->end_piv); in mas_wr_dump()