Lines Matching +full:root +full:- +full:node
1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
4 * Copyright(c) 2016 - 2017 Intel Corporation.
30 INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
33 static unsigned long mmu_node_start(struct mmu_rb_node *node) in mmu_node_start() argument
35 return node->addr & PAGE_MASK; in mmu_node_start()
38 static unsigned long mmu_node_last(struct mmu_rb_node *node) in mmu_node_last() argument
40 return PAGE_ALIGN(node->addr + node->len) - 1; in mmu_node_last()
53 return -ENOMEM; in hfi1_mmu_rb_register()
55 h->root = RB_ROOT_CACHED; in hfi1_mmu_rb_register()
56 h->ops = ops; in hfi1_mmu_rb_register()
57 h->ops_arg = ops_arg; in hfi1_mmu_rb_register()
58 INIT_HLIST_NODE(&h->mn.hlist); in hfi1_mmu_rb_register()
59 spin_lock_init(&h->lock); in hfi1_mmu_rb_register()
60 h->mn.ops = &mn_opts; in hfi1_mmu_rb_register()
61 INIT_WORK(&h->del_work, handle_remove); in hfi1_mmu_rb_register()
62 INIT_LIST_HEAD(&h->del_list); in hfi1_mmu_rb_register()
63 INIT_LIST_HEAD(&h->lru_list); in hfi1_mmu_rb_register()
64 h->wq = wq; in hfi1_mmu_rb_register()
66 ret = mmu_notifier_register(&h->mn, current->mm); in hfi1_mmu_rb_register()
79 struct rb_node *node; in hfi1_mmu_rb_unregister() local
84 mmu_notifier_unregister(&handler->mn, handler->mn.mm); in hfi1_mmu_rb_unregister()
90 flush_work(&handler->del_work); in hfi1_mmu_rb_unregister()
94 spin_lock_irqsave(&handler->lock, flags); in hfi1_mmu_rb_unregister()
95 while ((node = rb_first_cached(&handler->root))) { in hfi1_mmu_rb_unregister()
96 rbnode = rb_entry(node, struct mmu_rb_node, node); in hfi1_mmu_rb_unregister()
97 rb_erase_cached(node, &handler->root); in hfi1_mmu_rb_unregister()
99 list_move(&rbnode->list, &del_list); in hfi1_mmu_rb_unregister()
101 spin_unlock_irqrestore(&handler->lock, flags); in hfi1_mmu_rb_unregister()
111 struct mmu_rb_node *node; in hfi1_mmu_rb_insert() local
115 trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len); in hfi1_mmu_rb_insert()
117 if (current->mm != handler->mn.mm) in hfi1_mmu_rb_insert()
118 return -EPERM; in hfi1_mmu_rb_insert()
120 spin_lock_irqsave(&handler->lock, flags); in hfi1_mmu_rb_insert()
121 node = __mmu_rb_search(handler, mnode->addr, mnode->len); in hfi1_mmu_rb_insert()
122 if (node) { in hfi1_mmu_rb_insert()
123 ret = -EINVAL; in hfi1_mmu_rb_insert()
126 __mmu_int_rb_insert(mnode, &handler->root); in hfi1_mmu_rb_insert()
127 list_add(&mnode->list, &handler->lru_list); in hfi1_mmu_rb_insert()
129 ret = handler->ops->insert(handler->ops_arg, mnode); in hfi1_mmu_rb_insert()
131 __mmu_int_rb_remove(mnode, &handler->root); in hfi1_mmu_rb_insert()
132 list_del(&mnode->list); /* remove from LRU list */ in hfi1_mmu_rb_insert()
134 mnode->handler = handler; in hfi1_mmu_rb_insert()
136 spin_unlock_irqrestore(&handler->lock, flags); in hfi1_mmu_rb_insert()
145 struct mmu_rb_node *node = NULL; in __mmu_rb_search() local
148 if (!handler->ops->filter) { in __mmu_rb_search()
149 node = __mmu_int_rb_iter_first(&handler->root, addr, in __mmu_rb_search()
150 (addr + len) - 1); in __mmu_rb_search()
152 for (node = __mmu_int_rb_iter_first(&handler->root, addr, in __mmu_rb_search()
153 (addr + len) - 1); in __mmu_rb_search()
154 node; in __mmu_rb_search()
155 node = __mmu_int_rb_iter_next(node, addr, in __mmu_rb_search()
156 (addr + len) - 1)) { in __mmu_rb_search()
157 if (handler->ops->filter(node, addr, len)) in __mmu_rb_search()
158 return node; in __mmu_rb_search()
161 return node; in __mmu_rb_search()
168 struct mmu_rb_node *node; in hfi1_mmu_rb_remove_unless_exact() local
172 if (current->mm != handler->mn.mm) in hfi1_mmu_rb_remove_unless_exact()
175 spin_lock_irqsave(&handler->lock, flags); in hfi1_mmu_rb_remove_unless_exact()
176 node = __mmu_rb_search(handler, addr, len); in hfi1_mmu_rb_remove_unless_exact()
177 if (node) { in hfi1_mmu_rb_remove_unless_exact()
178 if (node->addr == addr && node->len == len) in hfi1_mmu_rb_remove_unless_exact()
180 __mmu_int_rb_remove(node, &handler->root); in hfi1_mmu_rb_remove_unless_exact()
181 list_del(&node->list); /* remove from LRU list */ in hfi1_mmu_rb_remove_unless_exact()
185 spin_unlock_irqrestore(&handler->lock, flags); in hfi1_mmu_rb_remove_unless_exact()
186 *rb_node = node; in hfi1_mmu_rb_remove_unless_exact()
197 if (current->mm != handler->mn.mm) in hfi1_mmu_rb_evict()
202 spin_lock_irqsave(&handler->lock, flags); in hfi1_mmu_rb_evict()
203 list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list, in hfi1_mmu_rb_evict()
205 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg, in hfi1_mmu_rb_evict()
207 __mmu_int_rb_remove(rbnode, &handler->root); in hfi1_mmu_rb_evict()
209 list_move(&rbnode->list, &del_list); in hfi1_mmu_rb_evict()
214 spin_unlock_irqrestore(&handler->lock, flags); in hfi1_mmu_rb_evict()
218 list_del(&rbnode->list); in hfi1_mmu_rb_evict()
219 handler->ops->remove(handler->ops_arg, rbnode); in hfi1_mmu_rb_evict()
226 * 'node'.
229 struct mmu_rb_node *node) in hfi1_mmu_rb_remove() argument
233 if (current->mm != handler->mn.mm) in hfi1_mmu_rb_remove()
236 /* Validity of handler and node pointers has been checked by caller. */ in hfi1_mmu_rb_remove()
237 trace_hfi1_mmu_rb_remove(node->addr, node->len); in hfi1_mmu_rb_remove()
238 spin_lock_irqsave(&handler->lock, flags); in hfi1_mmu_rb_remove()
239 __mmu_int_rb_remove(node, &handler->root); in hfi1_mmu_rb_remove()
240 list_del(&node->list); /* remove from LRU list */ in hfi1_mmu_rb_remove()
241 spin_unlock_irqrestore(&handler->lock, flags); in hfi1_mmu_rb_remove()
243 handler->ops->remove(handler->ops_arg, node); in hfi1_mmu_rb_remove()
251 struct rb_root_cached *root = &handler->root; in mmu_notifier_range_start() local
252 struct mmu_rb_node *node, *ptr = NULL; in mmu_notifier_range_start() local
256 spin_lock_irqsave(&handler->lock, flags); in mmu_notifier_range_start()
257 for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1); in mmu_notifier_range_start()
258 node; node = ptr) { in mmu_notifier_range_start()
259 /* Guard against node removal. */ in mmu_notifier_range_start()
260 ptr = __mmu_int_rb_iter_next(node, range->start, in mmu_notifier_range_start()
261 range->end - 1); in mmu_notifier_range_start()
262 trace_hfi1_mmu_mem_invalidate(node->addr, node->len); in mmu_notifier_range_start()
263 if (handler->ops->invalidate(handler->ops_arg, node)) { in mmu_notifier_range_start()
264 __mmu_int_rb_remove(node, root); in mmu_notifier_range_start()
266 list_move(&node->list, &handler->del_list); in mmu_notifier_range_start()
270 spin_unlock_irqrestore(&handler->lock, flags); in mmu_notifier_range_start()
273 queue_work(handler->wq, &handler->del_work); in mmu_notifier_range_start()
286 struct mmu_rb_node *node; in do_remove() local
289 node = list_first_entry(del_list, struct mmu_rb_node, list); in do_remove()
290 list_del(&node->list); in do_remove()
291 handler->ops->remove(handler->ops_arg, node); in do_remove()
297 * be removed. The key feature is that mm->mmap_lock is not being held
309 spin_lock_irqsave(&handler->lock, flags); in handle_remove()
310 list_replace_init(&handler->del_list, &del_list); in handle_remove()
311 spin_unlock_irqrestore(&handler->lock, flags); in handle_remove()