1 /*
2  * Copyright 2012 Red Hat Inc
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Dave Airlie <airlied@redhat.com>
25  */
26 
27 #include <linux/dma-buf.h>
28 #include <linux/reservation.h>
29 
30 #include <drm/drmP.h>
31 
32 #include "i915_drv.h"
33 
dma_buf_to_obj(struct dma_buf * buf)34 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
35 {
36 	return to_intel_bo(buf->priv);
37 }
38 
i915_gem_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)39 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
40 					     enum dma_data_direction dir)
41 {
42 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
43 	struct sg_table *st;
44 	struct scatterlist *src, *dst;
45 	int ret, i;
46 
47 	ret = i915_gem_object_pin_pages(obj);
48 	if (ret)
49 		goto err;
50 
51 	/* Copy sg so that we make an independent mapping */
52 	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
53 	if (st == NULL) {
54 		ret = -ENOMEM;
55 		goto err_unpin_pages;
56 	}
57 
58 	ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
59 	if (ret)
60 		goto err_free;
61 
62 	src = obj->mm.pages->sgl;
63 	dst = st->sgl;
64 	for (i = 0; i < obj->mm.pages->nents; i++) {
65 		sg_set_page(dst, sg_page(src), src->length, 0);
66 		dst = sg_next(dst);
67 		src = sg_next(src);
68 	}
69 
70 	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
71 		ret = -ENOMEM;
72 		goto err_free_sg;
73 	}
74 
75 	return st;
76 
77 err_free_sg:
78 	sg_free_table(st);
79 err_free:
80 	kfree(st);
81 err_unpin_pages:
82 	i915_gem_object_unpin_pages(obj);
83 err:
84 	return ERR_PTR(ret);
85 }
86 
i915_gem_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * sg,enum dma_data_direction dir)87 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
88 				   struct sg_table *sg,
89 				   enum dma_data_direction dir)
90 {
91 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
92 
93 	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
94 	sg_free_table(sg);
95 	kfree(sg);
96 
97 	i915_gem_object_unpin_pages(obj);
98 }
99 
i915_gem_dmabuf_vmap(struct dma_buf * dma_buf)100 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
101 {
102 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
103 
104 	return i915_gem_object_pin_map(obj, I915_MAP_WB);
105 }
106 
i915_gem_dmabuf_vunmap(struct dma_buf * dma_buf,void * vaddr)107 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
108 {
109 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
110 
111 	i915_gem_object_unpin_map(obj);
112 }
113 
i915_gem_dmabuf_kmap(struct dma_buf * dma_buf,unsigned long page_num)114 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
115 {
116 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
117 	struct page *page;
118 
119 	if (page_num >= obj->base.size >> PAGE_SHIFT)
120 		return NULL;
121 
122 	if (!i915_gem_object_has_struct_page(obj))
123 		return NULL;
124 
125 	if (i915_gem_object_pin_pages(obj))
126 		return NULL;
127 
128 	/* Synchronisation is left to the caller (via .begin_cpu_access()) */
129 	page = i915_gem_object_get_page(obj, page_num);
130 	if (IS_ERR(page))
131 		goto err_unpin;
132 
133 	return kmap(page);
134 
135 err_unpin:
136 	i915_gem_object_unpin_pages(obj);
137 	return NULL;
138 }
139 
i915_gem_dmabuf_kunmap(struct dma_buf * dma_buf,unsigned long page_num,void * addr)140 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
141 {
142 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
143 
144 	kunmap(virt_to_page(addr));
145 	i915_gem_object_unpin_pages(obj);
146 }
147 
i915_gem_dmabuf_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)148 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
149 {
150 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
151 	int ret;
152 
153 	if (obj->base.size < vma->vm_end - vma->vm_start)
154 		return -EINVAL;
155 
156 	if (!obj->base.filp)
157 		return -ENODEV;
158 
159 	ret = call_mmap(obj->base.filp, vma);
160 	if (ret)
161 		return ret;
162 
163 	fput(vma->vm_file);
164 	vma->vm_file = get_file(obj->base.filp);
165 
166 	return 0;
167 }
168 
i915_gem_begin_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction direction)169 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
170 {
171 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
172 	struct drm_device *dev = obj->base.dev;
173 	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
174 	int err;
175 
176 	err = i915_gem_object_pin_pages(obj);
177 	if (err)
178 		return err;
179 
180 	err = i915_mutex_lock_interruptible(dev);
181 	if (err)
182 		goto out;
183 
184 	err = i915_gem_object_set_to_cpu_domain(obj, write);
185 	mutex_unlock(&dev->struct_mutex);
186 
187 out:
188 	i915_gem_object_unpin_pages(obj);
189 	return err;
190 }
191 
i915_gem_end_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction direction)192 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
193 {
194 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
195 	struct drm_device *dev = obj->base.dev;
196 	int err;
197 
198 	err = i915_gem_object_pin_pages(obj);
199 	if (err)
200 		return err;
201 
202 	err = i915_mutex_lock_interruptible(dev);
203 	if (err)
204 		goto out;
205 
206 	err = i915_gem_object_set_to_gtt_domain(obj, false);
207 	mutex_unlock(&dev->struct_mutex);
208 
209 out:
210 	i915_gem_object_unpin_pages(obj);
211 	return err;
212 }
213 
214 static const struct dma_buf_ops i915_dmabuf_ops =  {
215 	.map_dma_buf = i915_gem_map_dma_buf,
216 	.unmap_dma_buf = i915_gem_unmap_dma_buf,
217 	.release = drm_gem_dmabuf_release,
218 	.map = i915_gem_dmabuf_kmap,
219 	.unmap = i915_gem_dmabuf_kunmap,
220 	.mmap = i915_gem_dmabuf_mmap,
221 	.vmap = i915_gem_dmabuf_vmap,
222 	.vunmap = i915_gem_dmabuf_vunmap,
223 	.begin_cpu_access = i915_gem_begin_cpu_access,
224 	.end_cpu_access = i915_gem_end_cpu_access,
225 };
226 
i915_gem_prime_export(struct drm_device * dev,struct drm_gem_object * gem_obj,int flags)227 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
228 				      struct drm_gem_object *gem_obj, int flags)
229 {
230 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
231 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
232 
233 	exp_info.ops = &i915_dmabuf_ops;
234 	exp_info.size = gem_obj->size;
235 	exp_info.flags = flags;
236 	exp_info.priv = gem_obj;
237 	exp_info.resv = obj->resv;
238 
239 	if (obj->ops->dmabuf_export) {
240 		int ret = obj->ops->dmabuf_export(obj);
241 		if (ret)
242 			return ERR_PTR(ret);
243 	}
244 
245 	return drm_gem_dmabuf_export(dev, &exp_info);
246 }
247 
i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object * obj)248 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
249 {
250 	struct sg_table *pages;
251 	unsigned int sg_page_sizes;
252 
253 	pages = dma_buf_map_attachment(obj->base.import_attach,
254 				       DMA_BIDIRECTIONAL);
255 	if (IS_ERR(pages))
256 		return PTR_ERR(pages);
257 
258 	sg_page_sizes = i915_sg_page_sizes(pages->sgl);
259 
260 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
261 
262 	return 0;
263 }
264 
i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object * obj,struct sg_table * pages)265 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
266 					     struct sg_table *pages)
267 {
268 	dma_buf_unmap_attachment(obj->base.import_attach, pages,
269 				 DMA_BIDIRECTIONAL);
270 }
271 
272 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
273 	.get_pages = i915_gem_object_get_pages_dmabuf,
274 	.put_pages = i915_gem_object_put_pages_dmabuf,
275 };
276 
i915_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)277 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
278 					     struct dma_buf *dma_buf)
279 {
280 	struct dma_buf_attachment *attach;
281 	struct drm_i915_gem_object *obj;
282 	int ret;
283 
284 	/* is this one of own objects? */
285 	if (dma_buf->ops == &i915_dmabuf_ops) {
286 		obj = dma_buf_to_obj(dma_buf);
287 		/* is it from our device? */
288 		if (obj->base.dev == dev) {
289 			/*
290 			 * Importing dmabuf exported from out own gem increases
291 			 * refcount on gem itself instead of f_count of dmabuf.
292 			 */
293 			return &i915_gem_object_get(obj)->base;
294 		}
295 	}
296 
297 	/* need to attach */
298 	attach = dma_buf_attach(dma_buf, dev->dev);
299 	if (IS_ERR(attach))
300 		return ERR_CAST(attach);
301 
302 	get_dma_buf(dma_buf);
303 
304 	obj = i915_gem_object_alloc(to_i915(dev));
305 	if (obj == NULL) {
306 		ret = -ENOMEM;
307 		goto fail_detach;
308 	}
309 
310 	drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
311 	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
312 	obj->base.import_attach = attach;
313 	obj->resv = dma_buf->resv;
314 
315 	/* We use GTT as shorthand for a coherent domain, one that is
316 	 * neither in the GPU cache nor in the CPU cache, where all
317 	 * writes are immediately visible in memory. (That's not strictly
318 	 * true, but it's close! There are internal buffers such as the
319 	 * write-combined buffer or a delay through the chipset for GTT
320 	 * writes that do require us to treat GTT as a separate cache domain.)
321 	 */
322 	obj->read_domains = I915_GEM_DOMAIN_GTT;
323 	obj->write_domain = 0;
324 
325 	return &obj->base;
326 
327 fail_detach:
328 	dma_buf_detach(dma_buf, attach);
329 	dma_buf_put(dma_buf);
330 
331 	return ERR_PTR(ret);
332 }
333 
334 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
335 #include "selftests/mock_dmabuf.c"
336 #include "selftests/i915_gem_dmabuf.c"
337 #endif
338