Lines Matching +full:software +full:- +full:based
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
39 * The vma-manager is responsible to map arbitrary driver-dependent memory
40 * regions into the linear user address-space. It provides offsets to the
41 * caller which can then be used on the address_space of the drm-device. It
43 * confuse mm-core by inconsistent fake vm_pgoff fields.
45 * only be used to manage mappings into linear user-space VMs.
48 * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
52 * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
55 * This offset manager works on page-based addresses. That is, every argument
58 * must always be page-aligned (as usual).
59 * If you want to get a valid byte-based user-space address for a given offset,
63 * management. For every open-file context that is allowed to access a given
65 * open-file with the offset of the node will fail with -EACCES. To revoke
71 * drm_vma_offset_manager_init - Initialize new offset-manager
73 * @page_offset: Offset of available memory area (page-based)
74 * @size: Size of available address space range (page-based)
76 * Initialize a new offset-manager. The offset and area size available for the
78 * page-numbers, not bytes.
82 * for the caller. While calling into the vma-manager, a given node must
88 rwlock_init(&mgr->vm_lock); in drm_vma_offset_manager_init()
89 drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size); in drm_vma_offset_manager_init()
94 * drm_vma_offset_manager_destroy() - Destroy offset manager
106 drm_mm_takedown(&mgr->vm_addr_space_mm); in drm_vma_offset_manager_destroy()
111 * drm_vma_offset_lookup_locked() - Find node in offset space
113 * @start: Start address for object (page-based)
114 * @pages: Size of object (page-based)
148 iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node; in drm_vma_offset_lookup_locked()
153 offset = node->start; in drm_vma_offset_lookup_locked()
155 iter = iter->rb_right; in drm_vma_offset_lookup_locked()
160 iter = iter->rb_left; in drm_vma_offset_lookup_locked()
166 offset = best->start + best->size; in drm_vma_offset_lookup_locked()
179 * drm_vma_offset_add() - Add offset node to manager
182 * @pages: Allocation size visible to user-space (in number of pages)
184 * Add a node to the offset-manager. If the node was already added, this does
195 * that you want to map. It only limits the size that user-space can map into
206 write_lock(&mgr->vm_lock); in drm_vma_offset_add()
208 if (!drm_mm_node_allocated(&node->vm_node)) in drm_vma_offset_add()
209 ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, in drm_vma_offset_add()
210 &node->vm_node, pages); in drm_vma_offset_add()
212 write_unlock(&mgr->vm_lock); in drm_vma_offset_add()
219 * drm_vma_offset_remove() - Remove offset node from manager
232 write_lock(&mgr->vm_lock); in drm_vma_offset_remove()
234 if (drm_mm_node_allocated(&node->vm_node)) { in drm_vma_offset_remove()
235 drm_mm_remove_node(&node->vm_node); in drm_vma_offset_remove()
236 memset(&node->vm_node, 0, sizeof(node->vm_node)); in drm_vma_offset_remove()
239 write_unlock(&mgr->vm_lock); in drm_vma_offset_remove()
244 * drm_vma_node_allow - Add open-file to list of allowed users
248 * Add @tag to the list of allowed open-files for this node. If @tag is
249 * already on this list, the ref-count is incremented.
251 * The list of allowed-users is preserved across drm_vma_offset_add() and
253 * not added to any offset-manager.
255 * You must remove all open-files the same number of times as you added them
261 * 0 on success, negative error code on internal failure (out-of-mem)
271 * unlikely that an open-file is added twice to a single node so we in drm_vma_node_allow()
276 write_lock(&node->vm_lock); in drm_vma_node_allow()
278 iter = &node->vm_files.rb_node; in drm_vma_node_allow()
284 if (tag == entry->vm_tag) { in drm_vma_node_allow()
285 entry->vm_count++; in drm_vma_node_allow()
287 } else if (tag > entry->vm_tag) { in drm_vma_node_allow()
288 iter = &(*iter)->rb_right; in drm_vma_node_allow()
290 iter = &(*iter)->rb_left; in drm_vma_node_allow()
295 ret = -ENOMEM; in drm_vma_node_allow()
299 new->vm_tag = tag; in drm_vma_node_allow()
300 new->vm_count = 1; in drm_vma_node_allow()
301 rb_link_node(&new->vm_rb, parent, iter); in drm_vma_node_allow()
302 rb_insert_color(&new->vm_rb, &node->vm_files); in drm_vma_node_allow()
306 write_unlock(&node->vm_lock); in drm_vma_node_allow()
313 * drm_vma_node_revoke - Remove open-file from list of allowed users
317 * Decrement the ref-count of @tag in the list of allowed open-files on @node.
318 * If the ref-count drops to zero, remove @tag from the list. You must call
331 write_lock(&node->vm_lock); in drm_vma_node_revoke()
333 iter = node->vm_files.rb_node; in drm_vma_node_revoke()
336 if (tag == entry->vm_tag) { in drm_vma_node_revoke()
337 if (!--entry->vm_count) { in drm_vma_node_revoke()
338 rb_erase(&entry->vm_rb, &node->vm_files); in drm_vma_node_revoke()
342 } else if (tag > entry->vm_tag) { in drm_vma_node_revoke()
343 iter = iter->rb_right; in drm_vma_node_revoke()
345 iter = iter->rb_left; in drm_vma_node_revoke()
349 write_unlock(&node->vm_lock); in drm_vma_node_revoke()
354 * drm_vma_node_is_allowed - Check whether an open-file is granted access
359 * open-files (see drm_vma_node_allow()).
372 read_lock(&node->vm_lock); in drm_vma_node_is_allowed()
374 iter = node->vm_files.rb_node; in drm_vma_node_is_allowed()
377 if (tag == entry->vm_tag) in drm_vma_node_is_allowed()
379 else if (tag > entry->vm_tag) in drm_vma_node_is_allowed()
380 iter = iter->rb_right; in drm_vma_node_is_allowed()
382 iter = iter->rb_left; in drm_vma_node_is_allowed()
385 read_unlock(&node->vm_lock); in drm_vma_node_is_allowed()