1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright 2009-2011 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29
vmw_bo_vm_lookup(struct ttm_device * bdev,unsigned long offset,unsigned long pages)30 static struct ttm_buffer_object *vmw_bo_vm_lookup(struct ttm_device *bdev,
31 unsigned long offset,
32 unsigned long pages)
33 {
34 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
35 struct drm_device *drm = &dev_priv->drm;
36 struct drm_vma_offset_node *node;
37 struct ttm_buffer_object *bo = NULL;
38
39 drm_vma_offset_lock_lookup(bdev->vma_manager);
40
41 node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages);
42 if (likely(node)) {
43 bo = container_of(node, struct ttm_buffer_object,
44 base.vma_node);
45 bo = ttm_bo_get_unless_zero(bo);
46 }
47
48 drm_vma_offset_unlock_lookup(bdev->vma_manager);
49
50 if (!bo)
51 drm_err(drm, "Could not find buffer object to map\n");
52
53 return bo;
54 }
55
vmw_mmap(struct file * filp,struct vm_area_struct * vma)56 int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
57 {
58 static const struct vm_operations_struct vmw_vm_ops = {
59 .pfn_mkwrite = vmw_bo_vm_mkwrite,
60 .page_mkwrite = vmw_bo_vm_mkwrite,
61 .fault = vmw_bo_vm_fault,
62 .open = ttm_bo_vm_open,
63 .close = ttm_bo_vm_close,
64 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
65 .huge_fault = vmw_bo_vm_huge_fault,
66 #endif
67 };
68 struct drm_file *file_priv = filp->private_data;
69 struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
70 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
71 struct ttm_device *bdev = &dev_priv->bdev;
72 struct ttm_buffer_object *bo;
73 int ret;
74
75 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
76 return -EINVAL;
77
78 bo = vmw_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
79 if (unlikely(!bo))
80 return -EINVAL;
81
82 ret = vmw_user_bo_verify_access(bo, tfile);
83 if (unlikely(ret != 0))
84 goto out_unref;
85
86 ret = ttm_bo_mmap_obj(vma, bo);
87 if (unlikely(ret != 0))
88 goto out_unref;
89
90 vma->vm_ops = &vmw_vm_ops;
91
92 /* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
93 if (!is_cow_mapping(vma->vm_flags))
94 vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
95
96 ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
97
98 return 0;
99
100 out_unref:
101 ttm_bo_put(bo);
102 return ret;
103 }
104
105 /* struct vmw_validation_mem callback */
vmw_vmt_reserve(struct vmw_validation_mem * m,size_t size)106 static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
107 {
108 static struct ttm_operation_ctx ctx = {.interruptible = false,
109 .no_wait_gpu = false};
110 struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
111
112 return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
113 }
114
115 /* struct vmw_validation_mem callback */
vmw_vmt_unreserve(struct vmw_validation_mem * m,size_t size)116 static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
117 {
118 struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
119
120 return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
121 }
122
123 /**
124 * vmw_validation_mem_init_ttm - Interface the validation memory tracker
125 * to ttm.
126 * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
127 * rather than a struct vmw_validation_mem is to make sure assumption in the
128 * callbacks that struct vmw_private derives from struct vmw_validation_mem
129 * holds true.
130 * @gran: The recommended allocation granularity
131 */
vmw_validation_mem_init_ttm(struct vmw_private * dev_priv,size_t gran)132 void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
133 {
134 struct vmw_validation_mem *vvm = &dev_priv->vvm;
135
136 vvm->reserve_mem = vmw_vmt_reserve;
137 vvm->unreserve_mem = vmw_vmt_unreserve;
138 vvm->gran = gran;
139 }
140