1 /*
2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/refcount.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-vmalloc.h>
23 #include <media/videobuf2-memops.h>
24
25 struct vb2_vmalloc_buf {
26 void *vaddr;
27 struct frame_vector *vec;
28 enum dma_data_direction dma_dir;
29 unsigned long size;
30 refcount_t refcount;
31 struct vb2_vmarea_handler handler;
32 struct dma_buf *dbuf;
33 };
34
35 static void vb2_vmalloc_put(void *buf_priv);
36
vb2_vmalloc_alloc(struct device * dev,unsigned long attrs,unsigned long size,enum dma_data_direction dma_dir,gfp_t gfp_flags)37 static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
38 unsigned long size, enum dma_data_direction dma_dir,
39 gfp_t gfp_flags)
40 {
41 struct vb2_vmalloc_buf *buf;
42
43 buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
44 if (!buf)
45 return ERR_PTR(-ENOMEM);
46
47 buf->size = size;
48 buf->vaddr = vmalloc_user(buf->size);
49 buf->dma_dir = dma_dir;
50 buf->handler.refcount = &buf->refcount;
51 buf->handler.put = vb2_vmalloc_put;
52 buf->handler.arg = buf;
53
54 if (!buf->vaddr) {
55 pr_debug("vmalloc of size %ld failed\n", buf->size);
56 kfree(buf);
57 return ERR_PTR(-ENOMEM);
58 }
59
60 refcount_set(&buf->refcount, 1);
61 return buf;
62 }
63
vb2_vmalloc_put(void * buf_priv)64 static void vb2_vmalloc_put(void *buf_priv)
65 {
66 struct vb2_vmalloc_buf *buf = buf_priv;
67
68 if (refcount_dec_and_test(&buf->refcount)) {
69 vfree(buf->vaddr);
70 kfree(buf);
71 }
72 }
73
vb2_vmalloc_get_userptr(struct device * dev,unsigned long vaddr,unsigned long size,enum dma_data_direction dma_dir)74 static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
75 unsigned long size,
76 enum dma_data_direction dma_dir)
77 {
78 struct vb2_vmalloc_buf *buf;
79 struct frame_vector *vec;
80 int n_pages, offset, i;
81 int ret = -ENOMEM;
82
83 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
84 if (!buf)
85 return ERR_PTR(-ENOMEM);
86
87 buf->dma_dir = dma_dir;
88 offset = vaddr & ~PAGE_MASK;
89 buf->size = size;
90 vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
91 dma_dir == DMA_BIDIRECTIONAL);
92 if (IS_ERR(vec)) {
93 ret = PTR_ERR(vec);
94 goto fail_pfnvec_create;
95 }
96 buf->vec = vec;
97 n_pages = frame_vector_count(vec);
98 if (frame_vector_to_pages(vec) < 0) {
99 unsigned long *nums = frame_vector_pfns(vec);
100
101 /*
102 * We cannot get page pointers for these pfns. Check memory is
103 * physically contiguous and use direct mapping.
104 */
105 for (i = 1; i < n_pages; i++)
106 if (nums[i-1] + 1 != nums[i])
107 goto fail_map;
108 buf->vaddr = (__force void *)
109 ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
110 } else {
111 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
112 PAGE_KERNEL);
113 }
114
115 if (!buf->vaddr)
116 goto fail_map;
117 buf->vaddr += offset;
118 return buf;
119
120 fail_map:
121 vb2_destroy_framevec(vec);
122 fail_pfnvec_create:
123 kfree(buf);
124
125 return ERR_PTR(ret);
126 }
127
vb2_vmalloc_put_userptr(void * buf_priv)128 static void vb2_vmalloc_put_userptr(void *buf_priv)
129 {
130 struct vb2_vmalloc_buf *buf = buf_priv;
131 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
132 unsigned int i;
133 struct page **pages;
134 unsigned int n_pages;
135
136 if (!buf->vec->is_pfns) {
137 n_pages = frame_vector_count(buf->vec);
138 pages = frame_vector_pages(buf->vec);
139 if (vaddr)
140 vm_unmap_ram((void *)vaddr, n_pages);
141 if (buf->dma_dir == DMA_FROM_DEVICE ||
142 buf->dma_dir == DMA_BIDIRECTIONAL)
143 for (i = 0; i < n_pages; i++)
144 set_page_dirty_lock(pages[i]);
145 } else {
146 iounmap((__force void __iomem *)buf->vaddr);
147 }
148 vb2_destroy_framevec(buf->vec);
149 kfree(buf);
150 }
151
vb2_vmalloc_vaddr(void * buf_priv)152 static void *vb2_vmalloc_vaddr(void *buf_priv)
153 {
154 struct vb2_vmalloc_buf *buf = buf_priv;
155
156 if (!buf->vaddr) {
157 pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
158 return NULL;
159 }
160
161 return buf->vaddr;
162 }
163
vb2_vmalloc_num_users(void * buf_priv)164 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
165 {
166 struct vb2_vmalloc_buf *buf = buf_priv;
167 return refcount_read(&buf->refcount);
168 }
169
vb2_vmalloc_mmap(void * buf_priv,struct vm_area_struct * vma)170 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
171 {
172 struct vb2_vmalloc_buf *buf = buf_priv;
173 int ret;
174
175 if (!buf) {
176 pr_err("No memory to map\n");
177 return -EINVAL;
178 }
179
180 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
181 if (ret) {
182 pr_err("Remapping vmalloc memory, error: %d\n", ret);
183 return ret;
184 }
185
186 /*
187 * Make sure that vm_areas for 2 buffers won't be merged together
188 */
189 vma->vm_flags |= VM_DONTEXPAND;
190
191 /*
192 * Use common vm_area operations to track buffer refcount.
193 */
194 vma->vm_private_data = &buf->handler;
195 vma->vm_ops = &vb2_common_vm_ops;
196
197 vma->vm_ops->open(vma);
198
199 return 0;
200 }
201
202 #ifdef CONFIG_HAS_DMA
203 /*********************************************/
204 /* DMABUF ops for exporters */
205 /*********************************************/
206
207 struct vb2_vmalloc_attachment {
208 struct sg_table sgt;
209 enum dma_data_direction dma_dir;
210 };
211
vb2_vmalloc_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)212 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
213 struct dma_buf_attachment *dbuf_attach)
214 {
215 struct vb2_vmalloc_attachment *attach;
216 struct vb2_vmalloc_buf *buf = dbuf->priv;
217 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
218 struct sg_table *sgt;
219 struct scatterlist *sg;
220 void *vaddr = buf->vaddr;
221 int ret;
222 int i;
223
224 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
225 if (!attach)
226 return -ENOMEM;
227
228 sgt = &attach->sgt;
229 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
230 if (ret) {
231 kfree(attach);
232 return ret;
233 }
234 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
235 struct page *page = vmalloc_to_page(vaddr);
236
237 if (!page) {
238 sg_free_table(sgt);
239 kfree(attach);
240 return -ENOMEM;
241 }
242 sg_set_page(sg, page, PAGE_SIZE, 0);
243 vaddr += PAGE_SIZE;
244 }
245
246 attach->dma_dir = DMA_NONE;
247 dbuf_attach->priv = attach;
248 return 0;
249 }
250
vb2_vmalloc_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)251 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
252 struct dma_buf_attachment *db_attach)
253 {
254 struct vb2_vmalloc_attachment *attach = db_attach->priv;
255 struct sg_table *sgt;
256
257 if (!attach)
258 return;
259
260 sgt = &attach->sgt;
261
262 /* release the scatterlist cache */
263 if (attach->dma_dir != DMA_NONE)
264 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
265 attach->dma_dir);
266 sg_free_table(sgt);
267 kfree(attach);
268 db_attach->priv = NULL;
269 }
270
vb2_vmalloc_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)271 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
272 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
273 {
274 struct vb2_vmalloc_attachment *attach = db_attach->priv;
275 /* stealing dmabuf mutex to serialize map/unmap operations */
276 struct mutex *lock = &db_attach->dmabuf->lock;
277 struct sg_table *sgt;
278
279 mutex_lock(lock);
280
281 sgt = &attach->sgt;
282 /* return previously mapped sg table */
283 if (attach->dma_dir == dma_dir) {
284 mutex_unlock(lock);
285 return sgt;
286 }
287
288 /* release any previous cache */
289 if (attach->dma_dir != DMA_NONE) {
290 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
291 attach->dma_dir);
292 attach->dma_dir = DMA_NONE;
293 }
294
295 /* mapping to the client with new direction */
296 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
297 dma_dir);
298 if (!sgt->nents) {
299 pr_err("failed to map scatterlist\n");
300 mutex_unlock(lock);
301 return ERR_PTR(-EIO);
302 }
303
304 attach->dma_dir = dma_dir;
305
306 mutex_unlock(lock);
307
308 return sgt;
309 }
310
vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)311 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
312 struct sg_table *sgt, enum dma_data_direction dma_dir)
313 {
314 /* nothing to be done here */
315 }
316
vb2_vmalloc_dmabuf_ops_release(struct dma_buf * dbuf)317 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
318 {
319 /* drop reference obtained in vb2_vmalloc_get_dmabuf */
320 vb2_vmalloc_put(dbuf->priv);
321 }
322
vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf * dbuf,unsigned long pgnum)323 static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
324 {
325 struct vb2_vmalloc_buf *buf = dbuf->priv;
326
327 return buf->vaddr + pgnum * PAGE_SIZE;
328 }
329
vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf * dbuf)330 static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
331 {
332 struct vb2_vmalloc_buf *buf = dbuf->priv;
333
334 return buf->vaddr;
335 }
336
vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)337 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
338 struct vm_area_struct *vma)
339 {
340 return vb2_vmalloc_mmap(dbuf->priv, vma);
341 }
342
343 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
344 .attach = vb2_vmalloc_dmabuf_ops_attach,
345 .detach = vb2_vmalloc_dmabuf_ops_detach,
346 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
347 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
348 .map = vb2_vmalloc_dmabuf_ops_kmap,
349 .vmap = vb2_vmalloc_dmabuf_ops_vmap,
350 .mmap = vb2_vmalloc_dmabuf_ops_mmap,
351 .release = vb2_vmalloc_dmabuf_ops_release,
352 };
353
vb2_vmalloc_get_dmabuf(void * buf_priv,unsigned long flags)354 static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
355 {
356 struct vb2_vmalloc_buf *buf = buf_priv;
357 struct dma_buf *dbuf;
358 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
359
360 exp_info.ops = &vb2_vmalloc_dmabuf_ops;
361 exp_info.size = buf->size;
362 exp_info.flags = flags;
363 exp_info.priv = buf;
364
365 if (WARN_ON(!buf->vaddr))
366 return NULL;
367
368 dbuf = dma_buf_export(&exp_info);
369 if (IS_ERR(dbuf))
370 return NULL;
371
372 /* dmabuf keeps reference to vb2 buffer */
373 refcount_inc(&buf->refcount);
374
375 return dbuf;
376 }
377 #endif /* CONFIG_HAS_DMA */
378
379
380 /*********************************************/
381 /* callbacks for DMABUF buffers */
382 /*********************************************/
383
vb2_vmalloc_map_dmabuf(void * mem_priv)384 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
385 {
386 struct vb2_vmalloc_buf *buf = mem_priv;
387
388 buf->vaddr = dma_buf_vmap(buf->dbuf);
389
390 return buf->vaddr ? 0 : -EFAULT;
391 }
392
vb2_vmalloc_unmap_dmabuf(void * mem_priv)393 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
394 {
395 struct vb2_vmalloc_buf *buf = mem_priv;
396
397 dma_buf_vunmap(buf->dbuf, buf->vaddr);
398 buf->vaddr = NULL;
399 }
400
vb2_vmalloc_detach_dmabuf(void * mem_priv)401 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
402 {
403 struct vb2_vmalloc_buf *buf = mem_priv;
404
405 if (buf->vaddr)
406 dma_buf_vunmap(buf->dbuf, buf->vaddr);
407
408 kfree(buf);
409 }
410
vb2_vmalloc_attach_dmabuf(struct device * dev,struct dma_buf * dbuf,unsigned long size,enum dma_data_direction dma_dir)411 static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
412 unsigned long size, enum dma_data_direction dma_dir)
413 {
414 struct vb2_vmalloc_buf *buf;
415
416 if (dbuf->size < size)
417 return ERR_PTR(-EFAULT);
418
419 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
420 if (!buf)
421 return ERR_PTR(-ENOMEM);
422
423 buf->dbuf = dbuf;
424 buf->dma_dir = dma_dir;
425 buf->size = size;
426
427 return buf;
428 }
429
430
431 const struct vb2_mem_ops vb2_vmalloc_memops = {
432 .alloc = vb2_vmalloc_alloc,
433 .put = vb2_vmalloc_put,
434 .get_userptr = vb2_vmalloc_get_userptr,
435 .put_userptr = vb2_vmalloc_put_userptr,
436 #ifdef CONFIG_HAS_DMA
437 .get_dmabuf = vb2_vmalloc_get_dmabuf,
438 #endif
439 .map_dmabuf = vb2_vmalloc_map_dmabuf,
440 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
441 .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
442 .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
443 .vaddr = vb2_vmalloc_vaddr,
444 .mmap = vb2_vmalloc_mmap,
445 .num_users = vb2_vmalloc_num_users,
446 };
447 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
448
449 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
450 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
451 MODULE_LICENSE("GPL");
452