1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
4  * Author: Rob Clark <rob.clark@linaro.org>
5  */
6 
7 #include <linux/dma-buf.h>
8 #include <linux/highmem.h>
9 
10 #include <drm/drm_prime.h>
11 
12 #include "omap_drv.h"
13 
14 /* -----------------------------------------------------------------------------
15  * DMABUF Export
16  */
17 
omap_gem_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)18 static struct sg_table *omap_gem_map_dma_buf(
19 		struct dma_buf_attachment *attachment,
20 		enum dma_data_direction dir)
21 {
22 	struct drm_gem_object *obj = attachment->dmabuf->priv;
23 	struct sg_table *sg;
24 	dma_addr_t dma_addr;
25 	int ret;
26 
27 	sg = kzalloc(sizeof(*sg), GFP_KERNEL);
28 	if (!sg)
29 		return ERR_PTR(-ENOMEM);
30 
31 	/* camera, etc, need physically contiguous.. but we need a
32 	 * better way to know this..
33 	 */
34 	ret = omap_gem_pin(obj, &dma_addr);
35 	if (ret)
36 		goto out;
37 
38 	ret = sg_alloc_table(sg, 1, GFP_KERNEL);
39 	if (ret)
40 		goto out;
41 
42 	sg_init_table(sg->sgl, 1);
43 	sg_dma_len(sg->sgl) = obj->size;
44 	sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
45 	sg_dma_address(sg->sgl) = dma_addr;
46 
47 	/* this must be after omap_gem_pin() to ensure we have pages attached */
48 	omap_gem_dma_sync_buffer(obj, dir);
49 
50 	return sg;
51 out:
52 	kfree(sg);
53 	return ERR_PTR(ret);
54 }
55 
omap_gem_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * sg,enum dma_data_direction dir)56 static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
57 		struct sg_table *sg, enum dma_data_direction dir)
58 {
59 	struct drm_gem_object *obj = attachment->dmabuf->priv;
60 	omap_gem_unpin(obj);
61 	sg_free_table(sg);
62 	kfree(sg);
63 }
64 
omap_gem_dmabuf_begin_cpu_access(struct dma_buf * buffer,enum dma_data_direction dir)65 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
66 		enum dma_data_direction dir)
67 {
68 	struct drm_gem_object *obj = buffer->priv;
69 	struct page **pages;
70 	if (omap_gem_flags(obj) & OMAP_BO_TILED) {
71 		/* TODO we would need to pin at least part of the buffer to
72 		 * get de-tiled view.  For now just reject it.
73 		 */
74 		return -ENOMEM;
75 	}
76 	/* make sure we have the pages: */
77 	return omap_gem_get_pages(obj, &pages, true);
78 }
79 
omap_gem_dmabuf_end_cpu_access(struct dma_buf * buffer,enum dma_data_direction dir)80 static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
81 					  enum dma_data_direction dir)
82 {
83 	struct drm_gem_object *obj = buffer->priv;
84 	omap_gem_put_pages(obj);
85 	return 0;
86 }
87 
omap_gem_dmabuf_kmap(struct dma_buf * buffer,unsigned long page_num)88 static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
89 		unsigned long page_num)
90 {
91 	struct drm_gem_object *obj = buffer->priv;
92 	struct page **pages;
93 	omap_gem_get_pages(obj, &pages, false);
94 	omap_gem_cpu_sync_page(obj, page_num);
95 	return kmap(pages[page_num]);
96 }
97 
omap_gem_dmabuf_kunmap(struct dma_buf * buffer,unsigned long page_num,void * addr)98 static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
99 		unsigned long page_num, void *addr)
100 {
101 	struct drm_gem_object *obj = buffer->priv;
102 	struct page **pages;
103 	omap_gem_get_pages(obj, &pages, false);
104 	kunmap(pages[page_num]);
105 }
106 
omap_gem_dmabuf_mmap(struct dma_buf * buffer,struct vm_area_struct * vma)107 static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
108 		struct vm_area_struct *vma)
109 {
110 	struct drm_gem_object *obj = buffer->priv;
111 	int ret = 0;
112 
113 	ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
114 	if (ret < 0)
115 		return ret;
116 
117 	return omap_gem_mmap_obj(obj, vma);
118 }
119 
120 static const struct dma_buf_ops omap_dmabuf_ops = {
121 	.map_dma_buf = omap_gem_map_dma_buf,
122 	.unmap_dma_buf = omap_gem_unmap_dma_buf,
123 	.release = drm_gem_dmabuf_release,
124 	.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
125 	.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
126 	.map = omap_gem_dmabuf_kmap,
127 	.unmap = omap_gem_dmabuf_kunmap,
128 	.mmap = omap_gem_dmabuf_mmap,
129 };
130 
omap_gem_prime_export(struct drm_gem_object * obj,int flags)131 struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
132 {
133 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
134 
135 	exp_info.ops = &omap_dmabuf_ops;
136 	exp_info.size = obj->size;
137 	exp_info.flags = flags;
138 	exp_info.priv = obj;
139 
140 	return drm_gem_dmabuf_export(obj->dev, &exp_info);
141 }
142 
143 /* -----------------------------------------------------------------------------
144  * DMABUF Import
145  */
146 
omap_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)147 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
148 					     struct dma_buf *dma_buf)
149 {
150 	struct dma_buf_attachment *attach;
151 	struct drm_gem_object *obj;
152 	struct sg_table *sgt;
153 	int ret;
154 
155 	if (dma_buf->ops == &omap_dmabuf_ops) {
156 		obj = dma_buf->priv;
157 		if (obj->dev == dev) {
158 			/*
159 			 * Importing dmabuf exported from out own gem increases
160 			 * refcount on gem itself instead of f_count of dmabuf.
161 			 */
162 			drm_gem_object_get(obj);
163 			return obj;
164 		}
165 	}
166 
167 	attach = dma_buf_attach(dma_buf, dev->dev);
168 	if (IS_ERR(attach))
169 		return ERR_CAST(attach);
170 
171 	get_dma_buf(dma_buf);
172 
173 	sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
174 	if (IS_ERR(sgt)) {
175 		ret = PTR_ERR(sgt);
176 		goto fail_detach;
177 	}
178 
179 	obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
180 	if (IS_ERR(obj)) {
181 		ret = PTR_ERR(obj);
182 		goto fail_unmap;
183 	}
184 
185 	obj->import_attach = attach;
186 
187 	return obj;
188 
189 fail_unmap:
190 	dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
191 fail_detach:
192 	dma_buf_detach(dma_buf, attach);
193 	dma_buf_put(dma_buf);
194 
195 	return ERR_PTR(ret);
196 }
197