1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 #include <drm/drm_device.h>
4 #include <drm/drm_file.h>
5 #include <drm/drm_vram_mm_helper.h>
6 
7 #include <drm/ttm/ttm_page_alloc.h>
8 
9 /**
10  * DOC: overview
11  *
12  * The data structure &struct drm_vram_mm and its helpers implement a memory
13  * manager for simple framebuffer devices with dedicated video memory. Buffer
14  * objects are either placed in video RAM or evicted to system memory. These
15  * helper functions work well with &struct drm_gem_vram_object.
16  */
17 
18 /*
19  * TTM TT
20  */
21 
backend_func_destroy(struct ttm_tt * tt)22 static void backend_func_destroy(struct ttm_tt *tt)
23 {
24 	ttm_tt_fini(tt);
25 	kfree(tt);
26 }
27 
28 static struct ttm_backend_func backend_func = {
29 	.destroy = backend_func_destroy
30 };
31 
32 /*
33  * TTM BO device
34  */
35 
bo_driver_ttm_tt_create(struct ttm_buffer_object * bo,uint32_t page_flags)36 static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
37 					      uint32_t page_flags)
38 {
39 	struct ttm_tt *tt;
40 	int ret;
41 
42 	tt = kzalloc(sizeof(*tt), GFP_KERNEL);
43 	if (!tt)
44 		return NULL;
45 
46 	tt->func = &backend_func;
47 
48 	ret = ttm_tt_init(tt, bo, page_flags);
49 	if (ret < 0)
50 		goto err_ttm_tt_init;
51 
52 	return tt;
53 
54 err_ttm_tt_init:
55 	kfree(tt);
56 	return NULL;
57 }
58 
bo_driver_init_mem_type(struct ttm_bo_device * bdev,uint32_t type,struct ttm_mem_type_manager * man)59 static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
60 				   struct ttm_mem_type_manager *man)
61 {
62 	switch (type) {
63 	case TTM_PL_SYSTEM:
64 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
65 		man->available_caching = TTM_PL_MASK_CACHING;
66 		man->default_caching = TTM_PL_FLAG_CACHED;
67 		break;
68 	case TTM_PL_VRAM:
69 		man->func = &ttm_bo_manager_func;
70 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
71 			     TTM_MEMTYPE_FLAG_MAPPABLE;
72 		man->available_caching = TTM_PL_FLAG_UNCACHED |
73 					 TTM_PL_FLAG_WC;
74 		man->default_caching = TTM_PL_FLAG_WC;
75 		break;
76 	default:
77 		return -EINVAL;
78 	}
79 	return 0;
80 }
81 
bo_driver_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * placement)82 static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
83 				  struct ttm_placement *placement)
84 {
85 	struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
86 
87 	if (vmm->funcs && vmm->funcs->evict_flags)
88 		vmm->funcs->evict_flags(bo, placement);
89 }
90 
bo_driver_verify_access(struct ttm_buffer_object * bo,struct file * filp)91 static int bo_driver_verify_access(struct ttm_buffer_object *bo,
92 				   struct file *filp)
93 {
94 	struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
95 
96 	if (!vmm->funcs || !vmm->funcs->verify_access)
97 		return 0;
98 	return vmm->funcs->verify_access(bo, filp);
99 }
100 
bo_driver_io_mem_reserve(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)101 static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
102 				    struct ttm_mem_reg *mem)
103 {
104 	struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
105 	struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
106 
107 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
108 		return -EINVAL;
109 
110 	mem->bus.addr = NULL;
111 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
112 
113 	switch (mem->mem_type) {
114 	case TTM_PL_SYSTEM:	/* nothing to do */
115 		mem->bus.offset = 0;
116 		mem->bus.base = 0;
117 		mem->bus.is_iomem = false;
118 		break;
119 	case TTM_PL_VRAM:
120 		mem->bus.offset = mem->start << PAGE_SHIFT;
121 		mem->bus.base = vmm->vram_base;
122 		mem->bus.is_iomem = true;
123 		break;
124 	default:
125 		return -EINVAL;
126 	}
127 
128 	return 0;
129 }
130 
bo_driver_io_mem_free(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)131 static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
132 				  struct ttm_mem_reg *mem)
133 { }
134 
135 static struct ttm_bo_driver bo_driver = {
136 	.ttm_tt_create = bo_driver_ttm_tt_create,
137 	.ttm_tt_populate = ttm_pool_populate,
138 	.ttm_tt_unpopulate = ttm_pool_unpopulate,
139 	.init_mem_type = bo_driver_init_mem_type,
140 	.eviction_valuable = ttm_bo_eviction_valuable,
141 	.evict_flags = bo_driver_evict_flags,
142 	.verify_access = bo_driver_verify_access,
143 	.io_mem_reserve = bo_driver_io_mem_reserve,
144 	.io_mem_free = bo_driver_io_mem_free,
145 };
146 
147 /*
148  * struct drm_vram_mm
149  */
150 
151 /**
152  * drm_vram_mm_init() - Initialize an instance of VRAM MM.
153  * @vmm:	the VRAM MM instance to initialize
154  * @dev:	the DRM device
155  * @vram_base:	the base address of the video memory
156  * @vram_size:	the size of the video memory in bytes
157  * @funcs:	callback functions for buffer objects
158  *
159  * Returns:
160  * 0 on success, or
161  * a negative error code otherwise.
162  */
drm_vram_mm_init(struct drm_vram_mm * vmm,struct drm_device * dev,uint64_t vram_base,size_t vram_size,const struct drm_vram_mm_funcs * funcs)163 int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
164 		     uint64_t vram_base, size_t vram_size,
165 		     const struct drm_vram_mm_funcs *funcs)
166 {
167 	int ret;
168 
169 	vmm->vram_base = vram_base;
170 	vmm->vram_size = vram_size;
171 	vmm->funcs = funcs;
172 
173 	ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
174 				 dev->anon_inode->i_mapping,
175 				 true);
176 	if (ret)
177 		return ret;
178 
179 	ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
180 	if (ret)
181 		return ret;
182 
183 	return 0;
184 }
185 EXPORT_SYMBOL(drm_vram_mm_init);
186 
187 /**
188  * drm_vram_mm_cleanup() - Cleans up an initialized instance of VRAM MM.
189  * @vmm:	the VRAM MM instance to clean up
190  */
drm_vram_mm_cleanup(struct drm_vram_mm * vmm)191 void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
192 {
193 	ttm_bo_device_release(&vmm->bdev);
194 }
195 EXPORT_SYMBOL(drm_vram_mm_cleanup);
196 
197 /**
198  * drm_vram_mm_mmap() - Helper for implementing &struct file_operations.mmap()
199  * @filp:	the mapping's file structure
200  * @vma:	the mapping's memory area
201  * @vmm:	the VRAM MM instance
202  *
203  * Returns:
204  * 0 on success, or
205  * a negative error code otherwise.
206  */
drm_vram_mm_mmap(struct file * filp,struct vm_area_struct * vma,struct drm_vram_mm * vmm)207 int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
208 		     struct drm_vram_mm *vmm)
209 {
210 	return ttm_bo_mmap(filp, vma, &vmm->bdev);
211 }
212 EXPORT_SYMBOL(drm_vram_mm_mmap);
213 
214 /*
215  * Helpers for integration with struct drm_device
216  */
217 
218 /**
219  * drm_vram_helper_alloc_mm - Allocates a device's instance of \
220 	&struct drm_vram_mm
221  * @dev:	the DRM device
222  * @vram_base:	the base address of the video memory
223  * @vram_size:	the size of the video memory in bytes
224  * @funcs:	callback functions for buffer objects
225  *
226  * Returns:
227  * The new instance of &struct drm_vram_mm on success, or
228  * an ERR_PTR()-encoded errno code otherwise.
229  */
drm_vram_helper_alloc_mm(struct drm_device * dev,uint64_t vram_base,size_t vram_size,const struct drm_vram_mm_funcs * funcs)230 struct drm_vram_mm *drm_vram_helper_alloc_mm(
231 	struct drm_device *dev, uint64_t vram_base, size_t vram_size,
232 	const struct drm_vram_mm_funcs *funcs)
233 {
234 	int ret;
235 
236 	if (WARN_ON(dev->vram_mm))
237 		return dev->vram_mm;
238 
239 	dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL);
240 	if (!dev->vram_mm)
241 		return ERR_PTR(-ENOMEM);
242 
243 	ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size, funcs);
244 	if (ret)
245 		goto err_kfree;
246 
247 	return dev->vram_mm;
248 
249 err_kfree:
250 	kfree(dev->vram_mm);
251 	dev->vram_mm = NULL;
252 	return ERR_PTR(ret);
253 }
254 EXPORT_SYMBOL(drm_vram_helper_alloc_mm);
255 
256 /**
257  * drm_vram_helper_release_mm - Releases a device's instance of \
258 	&struct drm_vram_mm
259  * @dev:	the DRM device
260  */
drm_vram_helper_release_mm(struct drm_device * dev)261 void drm_vram_helper_release_mm(struct drm_device *dev)
262 {
263 	if (!dev->vram_mm)
264 		return;
265 
266 	drm_vram_mm_cleanup(dev->vram_mm);
267 	kfree(dev->vram_mm);
268 	dev->vram_mm = NULL;
269 }
270 EXPORT_SYMBOL(drm_vram_helper_release_mm);
271 
272 /*
273  * Helpers for &struct file_operations
274  */
275 
276 /**
277  * drm_vram_mm_file_operations_mmap() - \
278 	Implements &struct file_operations.mmap()
279  * @filp:	the mapping's file structure
280  * @vma:	the mapping's memory area
281  *
282  * Returns:
283  * 0 on success, or
284  * a negative error code otherwise.
285  */
drm_vram_mm_file_operations_mmap(struct file * filp,struct vm_area_struct * vma)286 int drm_vram_mm_file_operations_mmap(
287 	struct file *filp, struct vm_area_struct *vma)
288 {
289 	struct drm_file *file_priv = filp->private_data;
290 	struct drm_device *dev = file_priv->minor->dev;
291 
292 	if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
293 		return -EINVAL;
294 
295 	return drm_vram_mm_mmap(filp, vma, dev->vram_mm);
296 }
297 EXPORT_SYMBOL(drm_vram_mm_file_operations_mmap);
298