1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * drm gem CMA (contiguous memory allocator) helper functions
4  *
5  * Copyright (C) 2012 Sascha Hauer, Pengutronix
6  *
7  * Based on Samsung Exynos code
8  *
9  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10  */
11 
12 #include <linux/dma-buf.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/mm.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 
19 #include <drm/drm.h>
20 #include <drm/drm_device.h>
21 #include <drm/drm_drv.h>
22 #include <drm/drm_gem_cma_helper.h>
23 #include <drm/drm_vma_manager.h>
24 
25 /**
26  * DOC: cma helpers
27  *
28  * The Contiguous Memory Allocator reserves a pool of memory at early boot
29  * that is used to service requests for large blocks of contiguous memory.
30  *
31  * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
32  * objects that are physically contiguous in memory. This is useful for
33  * display drivers that are unable to map scattered buffers via an IOMMU.
34  */
35 
36 static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
37 	.free = drm_gem_cma_free_object,
38 	.print_info = drm_gem_cma_print_info,
39 	.get_sg_table = drm_gem_cma_get_sg_table,
40 	.vmap = drm_gem_cma_vmap,
41 	.mmap = drm_gem_cma_mmap,
42 	.vm_ops = &drm_gem_cma_vm_ops,
43 };
44 
45 /**
46  * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
47  * @drm: DRM device
48  * @size: size of the object to allocate
49  * @private: true if used for internal purposes
50  *
51  * This function creates and initializes a GEM CMA object of the given size,
52  * but doesn't allocate any memory to back the object.
53  *
54  * Returns:
55  * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
56  * error code on failure.
57  */
58 static struct drm_gem_cma_object *
__drm_gem_cma_create(struct drm_device * drm,size_t size,bool private)59 __drm_gem_cma_create(struct drm_device *drm, size_t size, bool private)
60 {
61 	struct drm_gem_cma_object *cma_obj;
62 	struct drm_gem_object *gem_obj;
63 	int ret = 0;
64 
65 	if (drm->driver->gem_create_object)
66 		gem_obj = drm->driver->gem_create_object(drm, size);
67 	else
68 		gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
69 	if (!gem_obj)
70 		return ERR_PTR(-ENOMEM);
71 
72 	if (!gem_obj->funcs)
73 		gem_obj->funcs = &drm_gem_cma_default_funcs;
74 
75 	cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
76 
77 	if (private) {
78 		drm_gem_private_object_init(drm, gem_obj, size);
79 
80 		/* Always use writecombine for dma-buf mappings */
81 		cma_obj->map_noncoherent = false;
82 	} else {
83 		ret = drm_gem_object_init(drm, gem_obj, size);
84 	}
85 	if (ret)
86 		goto error;
87 
88 	ret = drm_gem_create_mmap_offset(gem_obj);
89 	if (ret) {
90 		drm_gem_object_release(gem_obj);
91 		goto error;
92 	}
93 
94 	return cma_obj;
95 
96 error:
97 	kfree(cma_obj);
98 	return ERR_PTR(ret);
99 }
100 
101 /**
102  * drm_gem_cma_create - allocate an object with the given size
103  * @drm: DRM device
104  * @size: size of the object to allocate
105  *
106  * This function creates a CMA GEM object and allocates a contiguous chunk of
107  * memory as backing store.
108  *
109  * Returns:
110  * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
111  * error code on failure.
112  */
drm_gem_cma_create(struct drm_device * drm,size_t size)113 struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
114 					      size_t size)
115 {
116 	struct drm_gem_cma_object *cma_obj;
117 	int ret;
118 
119 	size = round_up(size, PAGE_SIZE);
120 
121 	cma_obj = __drm_gem_cma_create(drm, size, false);
122 	if (IS_ERR(cma_obj))
123 		return cma_obj;
124 
125 	if (cma_obj->map_noncoherent) {
126 		cma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
127 						       &cma_obj->paddr,
128 						       DMA_TO_DEVICE,
129 						       GFP_KERNEL | __GFP_NOWARN);
130 	} else {
131 		cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
132 					      GFP_KERNEL | __GFP_NOWARN);
133 	}
134 	if (!cma_obj->vaddr) {
135 		drm_dbg(drm, "failed to allocate buffer with size %zu\n",
136 			 size);
137 		ret = -ENOMEM;
138 		goto error;
139 	}
140 
141 	return cma_obj;
142 
143 error:
144 	drm_gem_object_put(&cma_obj->base);
145 	return ERR_PTR(ret);
146 }
147 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
148 
149 /**
150  * drm_gem_cma_create_with_handle - allocate an object with the given size and
151  *     return a GEM handle to it
152  * @file_priv: DRM file-private structure to register the handle for
153  * @drm: DRM device
154  * @size: size of the object to allocate
155  * @handle: return location for the GEM handle
156  *
157  * This function creates a CMA GEM object, allocating a physically contiguous
158  * chunk of memory as backing store. The GEM object is then added to the list
159  * of object associated with the given file and a handle to it is returned.
160  *
161  * Returns:
162  * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
163  * error code on failure.
164  */
165 static struct drm_gem_cma_object *
drm_gem_cma_create_with_handle(struct drm_file * file_priv,struct drm_device * drm,size_t size,uint32_t * handle)166 drm_gem_cma_create_with_handle(struct drm_file *file_priv,
167 			       struct drm_device *drm, size_t size,
168 			       uint32_t *handle)
169 {
170 	struct drm_gem_cma_object *cma_obj;
171 	struct drm_gem_object *gem_obj;
172 	int ret;
173 
174 	cma_obj = drm_gem_cma_create(drm, size);
175 	if (IS_ERR(cma_obj))
176 		return cma_obj;
177 
178 	gem_obj = &cma_obj->base;
179 
180 	/*
181 	 * allocate a id of idr table where the obj is registered
182 	 * and handle has the id what user can see.
183 	 */
184 	ret = drm_gem_handle_create(file_priv, gem_obj, handle);
185 	/* drop reference from allocate - handle holds it now. */
186 	drm_gem_object_put(gem_obj);
187 	if (ret)
188 		return ERR_PTR(ret);
189 
190 	return cma_obj;
191 }
192 
193 /**
194  * drm_gem_cma_free_object - free resources associated with a CMA GEM object
195  * @gem_obj: GEM object to free
196  *
197  * This function frees the backing memory of the CMA GEM object, cleans up the
198  * GEM object state and frees the memory used to store the object itself.
199  * If the buffer is imported and the virtual address is set, it is released.
200  * Drivers using the CMA helpers should set this as their
201  * &drm_gem_object_funcs.free callback.
202  */
drm_gem_cma_free_object(struct drm_gem_object * gem_obj)203 void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
204 {
205 	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj);
206 	struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr);
207 
208 	if (gem_obj->import_attach) {
209 		if (cma_obj->vaddr)
210 			dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
211 		drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
212 	} else if (cma_obj->vaddr) {
213 		dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
214 			    cma_obj->vaddr, cma_obj->paddr);
215 	}
216 
217 	drm_gem_object_release(gem_obj);
218 
219 	kfree(cma_obj);
220 }
221 EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
222 
223 /**
224  * drm_gem_cma_dumb_create_internal - create a dumb buffer object
225  * @file_priv: DRM file-private structure to create the dumb buffer for
226  * @drm: DRM device
227  * @args: IOCTL data
228  *
229  * This aligns the pitch and size arguments to the minimum required. This is
230  * an internal helper that can be wrapped by a driver to account for hardware
231  * with more specific alignment requirements. It should not be used directly
232  * as their &drm_driver.dumb_create callback.
233  *
234  * Returns:
235  * 0 on success or a negative error code on failure.
236  */
drm_gem_cma_dumb_create_internal(struct drm_file * file_priv,struct drm_device * drm,struct drm_mode_create_dumb * args)237 int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
238 				     struct drm_device *drm,
239 				     struct drm_mode_create_dumb *args)
240 {
241 	unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
242 	struct drm_gem_cma_object *cma_obj;
243 
244 	if (args->pitch < min_pitch)
245 		args->pitch = min_pitch;
246 
247 	if (args->size < args->pitch * args->height)
248 		args->size = args->pitch * args->height;
249 
250 	cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
251 						 &args->handle);
252 	return PTR_ERR_OR_ZERO(cma_obj);
253 }
254 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
255 
256 /**
257  * drm_gem_cma_dumb_create - create a dumb buffer object
258  * @file_priv: DRM file-private structure to create the dumb buffer for
259  * @drm: DRM device
260  * @args: IOCTL data
261  *
262  * This function computes the pitch of the dumb buffer and rounds it up to an
263  * integer number of bytes per pixel. Drivers for hardware that doesn't have
264  * any additional restrictions on the pitch can directly use this function as
265  * their &drm_driver.dumb_create callback.
266  *
267  * For hardware with additional restrictions, drivers can adjust the fields
268  * set up by userspace and pass the IOCTL data along to the
269  * drm_gem_cma_dumb_create_internal() function.
270  *
271  * Returns:
272  * 0 on success or a negative error code on failure.
273  */
drm_gem_cma_dumb_create(struct drm_file * file_priv,struct drm_device * drm,struct drm_mode_create_dumb * args)274 int drm_gem_cma_dumb_create(struct drm_file *file_priv,
275 			    struct drm_device *drm,
276 			    struct drm_mode_create_dumb *args)
277 {
278 	struct drm_gem_cma_object *cma_obj;
279 
280 	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
281 	args->size = args->pitch * args->height;
282 
283 	cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
284 						 &args->handle);
285 	return PTR_ERR_OR_ZERO(cma_obj);
286 }
287 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
288 
289 const struct vm_operations_struct drm_gem_cma_vm_ops = {
290 	.open = drm_gem_vm_open,
291 	.close = drm_gem_vm_close,
292 };
293 EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
294 
295 #ifndef CONFIG_MMU
296 /**
297  * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
298  * @filp: file object
299  * @addr: memory address
300  * @len: buffer size
301  * @pgoff: page offset
302  * @flags: memory flags
303  *
304  * This function is used in noMMU platforms to propose address mapping
305  * for a given buffer.
306  * It's intended to be used as a direct handler for the struct
307  * &file_operations.get_unmapped_area operation.
308  *
309  * Returns:
310  * mapping address on success or a negative error code on failure.
311  */
drm_gem_cma_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)312 unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
313 					    unsigned long addr,
314 					    unsigned long len,
315 					    unsigned long pgoff,
316 					    unsigned long flags)
317 {
318 	struct drm_gem_cma_object *cma_obj;
319 	struct drm_gem_object *obj = NULL;
320 	struct drm_file *priv = filp->private_data;
321 	struct drm_device *dev = priv->minor->dev;
322 	struct drm_vma_offset_node *node;
323 
324 	if (drm_dev_is_unplugged(dev))
325 		return -ENODEV;
326 
327 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
328 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
329 						  pgoff,
330 						  len >> PAGE_SHIFT);
331 	if (likely(node)) {
332 		obj = container_of(node, struct drm_gem_object, vma_node);
333 		/*
334 		 * When the object is being freed, after it hits 0-refcnt it
335 		 * proceeds to tear down the object. In the process it will
336 		 * attempt to remove the VMA offset and so acquire this
337 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
338 		 * that matches our range, we know it is in the process of being
339 		 * destroyed and will be freed as soon as we release the lock -
340 		 * so we have to check for the 0-refcnted object and treat it as
341 		 * invalid.
342 		 */
343 		if (!kref_get_unless_zero(&obj->refcount))
344 			obj = NULL;
345 	}
346 
347 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
348 
349 	if (!obj)
350 		return -EINVAL;
351 
352 	if (!drm_vma_node_is_allowed(node, priv)) {
353 		drm_gem_object_put(obj);
354 		return -EACCES;
355 	}
356 
357 	cma_obj = to_drm_gem_cma_obj(obj);
358 
359 	drm_gem_object_put(obj);
360 
361 	return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
362 }
363 EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
364 #endif
365 
366 /**
367  * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs
368  * @p: DRM printer
369  * @indent: Tab indentation level
370  * @obj: GEM object
371  *
372  * This function can be used as the &drm_driver->gem_print_info callback.
373  * It prints paddr and vaddr for use in e.g. debugfs output.
374  */
drm_gem_cma_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)375 void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
376 			    const struct drm_gem_object *obj)
377 {
378 	const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
379 
380 	drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
381 	drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
382 }
383 EXPORT_SYMBOL(drm_gem_cma_print_info);
384 
385 /**
386  * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned
387  *     pages for a CMA GEM object
388  * @obj: GEM object
389  *
390  * This function exports a scatter/gather table by
391  * calling the standard DMA mapping API. Drivers using the CMA helpers should
392  * set this as their &drm_gem_object_funcs.get_sg_table callback.
393  *
394  * Returns:
395  * A pointer to the scatter/gather table of pinned pages or NULL on failure.
396  */
drm_gem_cma_get_sg_table(struct drm_gem_object * obj)397 struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj)
398 {
399 	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
400 	struct sg_table *sgt;
401 	int ret;
402 
403 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
404 	if (!sgt)
405 		return ERR_PTR(-ENOMEM);
406 
407 	ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
408 			      cma_obj->paddr, obj->size);
409 	if (ret < 0)
410 		goto out;
411 
412 	return sgt;
413 
414 out:
415 	kfree(sgt);
416 	return ERR_PTR(ret);
417 }
418 EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table);
419 
420 /**
421  * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
422  *     driver's scatter/gather table of pinned pages
423  * @dev: device to import into
424  * @attach: DMA-BUF attachment
425  * @sgt: scatter/gather table of pinned pages
426  *
427  * This function imports a scatter/gather table exported via DMA-BUF by
428  * another driver. Imported buffers must be physically contiguous in memory
429  * (i.e. the scatter/gather table must contain a single entry). Drivers that
430  * use the CMA helpers should set this as their
431  * &drm_driver.gem_prime_import_sg_table callback.
432  *
433  * Returns:
434  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
435  * error code on failure.
436  */
437 struct drm_gem_object *
drm_gem_cma_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)438 drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
439 				  struct dma_buf_attachment *attach,
440 				  struct sg_table *sgt)
441 {
442 	struct drm_gem_cma_object *cma_obj;
443 
444 	/* check if the entries in the sg_table are contiguous */
445 	if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size)
446 		return ERR_PTR(-EINVAL);
447 
448 	/* Create a CMA GEM buffer. */
449 	cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size, true);
450 	if (IS_ERR(cma_obj))
451 		return ERR_CAST(cma_obj);
452 
453 	cma_obj->paddr = sg_dma_address(sgt->sgl);
454 	cma_obj->sgt = sgt;
455 
456 	DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
457 
458 	return &cma_obj->base;
459 }
460 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
461 
462 /**
463  * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual
464  *     address space
465  * @obj: GEM object
466  * @map: Returns the kernel virtual address of the CMA GEM object's backing
467  *       store.
468  *
469  * This function maps a buffer into the kernel's
470  * virtual address space. Since the CMA buffers are already mapped into the
471  * kernel virtual address space this simply returns the cached virtual
472  * address. Drivers using the CMA helpers should set this as their DRM
473  * driver's &drm_gem_object_funcs.vmap callback.
474  *
475  * Returns:
476  * 0 on success, or a negative error code otherwise.
477  */
drm_gem_cma_vmap(struct drm_gem_object * obj,struct dma_buf_map * map)478 int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
479 {
480 	struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
481 
482 	dma_buf_map_set_vaddr(map, cma_obj->vaddr);
483 
484 	return 0;
485 }
486 EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
487 
488 /**
489  * drm_gem_cma_mmap - memory-map an exported CMA GEM object
490  * @obj: GEM object
491  * @vma: VMA for the area to be mapped
492  *
493  * This function maps a buffer into a userspace process's address space.
494  * In addition to the usual GEM VMA setup it immediately faults in the entire
495  * object instead of using on-demand faulting. Drivers that use the CMA
496  * helpers should set this as their &drm_gem_object_funcs.mmap callback.
497  *
498  * Returns:
499  * 0 on success or a negative error code on failure.
500  */
drm_gem_cma_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)501 int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
502 {
503 	struct drm_gem_cma_object *cma_obj;
504 	int ret;
505 
506 	/*
507 	 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
508 	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
509 	 * the whole buffer.
510 	 */
511 	vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
512 	vma->vm_flags &= ~VM_PFNMAP;
513 
514 	cma_obj = to_drm_gem_cma_obj(obj);
515 
516 	if (cma_obj->map_noncoherent) {
517 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
518 
519 		ret = dma_mmap_pages(cma_obj->base.dev->dev,
520 				     vma, vma->vm_end - vma->vm_start,
521 				     virt_to_page(cma_obj->vaddr));
522 	} else {
523 		ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
524 				  cma_obj->paddr, vma->vm_end - vma->vm_start);
525 	}
526 	if (ret)
527 		drm_gem_vm_close(vma);
528 
529 	return ret;
530 }
531 EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
532 
533 /**
534  * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
535  *	scatter/gather table and get the virtual address of the buffer
536  * @dev: DRM device
537  * @attach: DMA-BUF attachment
538  * @sgt: Scatter/gather table of pinned pages
539  *
540  * This function imports a scatter/gather table using
541  * drm_gem_cma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
542  * virtual address. This ensures that a CMA GEM object always has its virtual
543  * address set. This address is released when the object is freed.
544  *
545  * This function can be used as the &drm_driver.gem_prime_import_sg_table
546  * callback. The &DRM_GEM_CMA_DRIVER_OPS_VMAP macro provides a shortcut to set
547  * the necessary DRM driver operations.
548  *
549  * Returns:
550  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
551  * error code on failure.
552  */
553 struct drm_gem_object *
drm_gem_cma_prime_import_sg_table_vmap(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)554 drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
555 				       struct dma_buf_attachment *attach,
556 				       struct sg_table *sgt)
557 {
558 	struct drm_gem_cma_object *cma_obj;
559 	struct drm_gem_object *obj;
560 	struct dma_buf_map map;
561 	int ret;
562 
563 	ret = dma_buf_vmap(attach->dmabuf, &map);
564 	if (ret) {
565 		DRM_ERROR("Failed to vmap PRIME buffer\n");
566 		return ERR_PTR(ret);
567 	}
568 
569 	obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
570 	if (IS_ERR(obj)) {
571 		dma_buf_vunmap(attach->dmabuf, &map);
572 		return obj;
573 	}
574 
575 	cma_obj = to_drm_gem_cma_obj(obj);
576 	cma_obj->vaddr = map.vaddr;
577 
578 	return obj;
579 }
580 EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
581