1 /*
2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/refcount.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-vmalloc.h>
23 #include <media/videobuf2-memops.h>
24
25 struct vb2_vmalloc_buf {
26 void *vaddr;
27 struct frame_vector *vec;
28 enum dma_data_direction dma_dir;
29 unsigned long size;
30 refcount_t refcount;
31 struct vb2_vmarea_handler handler;
32 struct dma_buf *dbuf;
33 };
34
35 static void vb2_vmalloc_put(void *buf_priv);
36
vb2_vmalloc_alloc(struct vb2_buffer * vb,struct device * dev,unsigned long size)37 static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev,
38 unsigned long size)
39 {
40 struct vb2_vmalloc_buf *buf;
41
42 buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
43 if (!buf)
44 return ERR_PTR(-ENOMEM);
45
46 buf->size = size;
47 buf->vaddr = vmalloc_user(buf->size);
48 if (!buf->vaddr) {
49 pr_debug("vmalloc of size %ld failed\n", buf->size);
50 kfree(buf);
51 return ERR_PTR(-ENOMEM);
52 }
53
54 buf->dma_dir = vb->vb2_queue->dma_dir;
55 buf->handler.refcount = &buf->refcount;
56 buf->handler.put = vb2_vmalloc_put;
57 buf->handler.arg = buf;
58
59 refcount_set(&buf->refcount, 1);
60 return buf;
61 }
62
vb2_vmalloc_put(void * buf_priv)63 static void vb2_vmalloc_put(void *buf_priv)
64 {
65 struct vb2_vmalloc_buf *buf = buf_priv;
66
67 if (refcount_dec_and_test(&buf->refcount)) {
68 vfree(buf->vaddr);
69 kfree(buf);
70 }
71 }
72
vb2_vmalloc_get_userptr(struct vb2_buffer * vb,struct device * dev,unsigned long vaddr,unsigned long size)73 static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev,
74 unsigned long vaddr, unsigned long size)
75 {
76 struct vb2_vmalloc_buf *buf;
77 struct frame_vector *vec;
78 int n_pages, offset, i;
79 int ret = -ENOMEM;
80
81 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
82 if (!buf)
83 return ERR_PTR(-ENOMEM);
84
85 buf->dma_dir = vb->vb2_queue->dma_dir;
86 offset = vaddr & ~PAGE_MASK;
87 buf->size = size;
88 vec = vb2_create_framevec(vaddr, size);
89 if (IS_ERR(vec)) {
90 ret = PTR_ERR(vec);
91 goto fail_pfnvec_create;
92 }
93 buf->vec = vec;
94 n_pages = frame_vector_count(vec);
95 if (frame_vector_to_pages(vec) < 0) {
96 unsigned long *nums = frame_vector_pfns(vec);
97
98 /*
99 * We cannot get page pointers for these pfns. Check memory is
100 * physically contiguous and use direct mapping.
101 */
102 for (i = 1; i < n_pages; i++)
103 if (nums[i-1] + 1 != nums[i])
104 goto fail_map;
105 buf->vaddr = (__force void *)
106 ioremap(__pfn_to_phys(nums[0]), size + offset);
107 } else {
108 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
109 }
110
111 if (!buf->vaddr)
112 goto fail_map;
113 buf->vaddr += offset;
114 return buf;
115
116 fail_map:
117 vb2_destroy_framevec(vec);
118 fail_pfnvec_create:
119 kfree(buf);
120
121 return ERR_PTR(ret);
122 }
123
vb2_vmalloc_put_userptr(void * buf_priv)124 static void vb2_vmalloc_put_userptr(void *buf_priv)
125 {
126 struct vb2_vmalloc_buf *buf = buf_priv;
127 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
128 unsigned int i;
129 struct page **pages;
130 unsigned int n_pages;
131
132 if (!buf->vec->is_pfns) {
133 n_pages = frame_vector_count(buf->vec);
134 pages = frame_vector_pages(buf->vec);
135 if (vaddr)
136 vm_unmap_ram((void *)vaddr, n_pages);
137 if (buf->dma_dir == DMA_FROM_DEVICE ||
138 buf->dma_dir == DMA_BIDIRECTIONAL)
139 for (i = 0; i < n_pages; i++)
140 set_page_dirty_lock(pages[i]);
141 } else {
142 iounmap((__force void __iomem *)buf->vaddr);
143 }
144 vb2_destroy_framevec(buf->vec);
145 kfree(buf);
146 }
147
vb2_vmalloc_vaddr(struct vb2_buffer * vb,void * buf_priv)148 static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv)
149 {
150 struct vb2_vmalloc_buf *buf = buf_priv;
151
152 if (!buf->vaddr) {
153 pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
154 return NULL;
155 }
156
157 return buf->vaddr;
158 }
159
vb2_vmalloc_num_users(void * buf_priv)160 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
161 {
162 struct vb2_vmalloc_buf *buf = buf_priv;
163 return refcount_read(&buf->refcount);
164 }
165
vb2_vmalloc_mmap(void * buf_priv,struct vm_area_struct * vma)166 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
167 {
168 struct vb2_vmalloc_buf *buf = buf_priv;
169 int ret;
170
171 if (!buf) {
172 pr_err("No memory to map\n");
173 return -EINVAL;
174 }
175
176 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
177 if (ret) {
178 pr_err("Remapping vmalloc memory, error: %d\n", ret);
179 return ret;
180 }
181
182 /*
183 * Make sure that vm_areas for 2 buffers won't be merged together
184 */
185 vma->vm_flags |= VM_DONTEXPAND;
186
187 /*
188 * Use common vm_area operations to track buffer refcount.
189 */
190 vma->vm_private_data = &buf->handler;
191 vma->vm_ops = &vb2_common_vm_ops;
192
193 vma->vm_ops->open(vma);
194
195 return 0;
196 }
197
198 #ifdef CONFIG_HAS_DMA
199 /*********************************************/
200 /* DMABUF ops for exporters */
201 /*********************************************/
202
203 struct vb2_vmalloc_attachment {
204 struct sg_table sgt;
205 enum dma_data_direction dma_dir;
206 };
207
vb2_vmalloc_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)208 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
209 struct dma_buf_attachment *dbuf_attach)
210 {
211 struct vb2_vmalloc_attachment *attach;
212 struct vb2_vmalloc_buf *buf = dbuf->priv;
213 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
214 struct sg_table *sgt;
215 struct scatterlist *sg;
216 void *vaddr = buf->vaddr;
217 int ret;
218 int i;
219
220 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
221 if (!attach)
222 return -ENOMEM;
223
224 sgt = &attach->sgt;
225 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
226 if (ret) {
227 kfree(attach);
228 return ret;
229 }
230 for_each_sgtable_sg(sgt, sg, i) {
231 struct page *page = vmalloc_to_page(vaddr);
232
233 if (!page) {
234 sg_free_table(sgt);
235 kfree(attach);
236 return -ENOMEM;
237 }
238 sg_set_page(sg, page, PAGE_SIZE, 0);
239 vaddr += PAGE_SIZE;
240 }
241
242 attach->dma_dir = DMA_NONE;
243 dbuf_attach->priv = attach;
244 return 0;
245 }
246
vb2_vmalloc_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)247 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
248 struct dma_buf_attachment *db_attach)
249 {
250 struct vb2_vmalloc_attachment *attach = db_attach->priv;
251 struct sg_table *sgt;
252
253 if (!attach)
254 return;
255
256 sgt = &attach->sgt;
257
258 /* release the scatterlist cache */
259 if (attach->dma_dir != DMA_NONE)
260 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
261 sg_free_table(sgt);
262 kfree(attach);
263 db_attach->priv = NULL;
264 }
265
vb2_vmalloc_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)266 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
267 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
268 {
269 struct vb2_vmalloc_attachment *attach = db_attach->priv;
270 /* stealing dmabuf mutex to serialize map/unmap operations */
271 struct mutex *lock = &db_attach->dmabuf->lock;
272 struct sg_table *sgt;
273
274 mutex_lock(lock);
275
276 sgt = &attach->sgt;
277 /* return previously mapped sg table */
278 if (attach->dma_dir == dma_dir) {
279 mutex_unlock(lock);
280 return sgt;
281 }
282
283 /* release any previous cache */
284 if (attach->dma_dir != DMA_NONE) {
285 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
286 attach->dma_dir = DMA_NONE;
287 }
288
289 /* mapping to the client with new direction */
290 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
291 pr_err("failed to map scatterlist\n");
292 mutex_unlock(lock);
293 return ERR_PTR(-EIO);
294 }
295
296 attach->dma_dir = dma_dir;
297
298 mutex_unlock(lock);
299
300 return sgt;
301 }
302
vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)303 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
304 struct sg_table *sgt, enum dma_data_direction dma_dir)
305 {
306 /* nothing to be done here */
307 }
308
vb2_vmalloc_dmabuf_ops_release(struct dma_buf * dbuf)309 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
310 {
311 /* drop reference obtained in vb2_vmalloc_get_dmabuf */
312 vb2_vmalloc_put(dbuf->priv);
313 }
314
vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf * dbuf,struct iosys_map * map)315 static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf,
316 struct iosys_map *map)
317 {
318 struct vb2_vmalloc_buf *buf = dbuf->priv;
319
320 iosys_map_set_vaddr(map, buf->vaddr);
321
322 return 0;
323 }
324
vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)325 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
326 struct vm_area_struct *vma)
327 {
328 return vb2_vmalloc_mmap(dbuf->priv, vma);
329 }
330
331 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
332 .attach = vb2_vmalloc_dmabuf_ops_attach,
333 .detach = vb2_vmalloc_dmabuf_ops_detach,
334 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
335 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
336 .vmap = vb2_vmalloc_dmabuf_ops_vmap,
337 .mmap = vb2_vmalloc_dmabuf_ops_mmap,
338 .release = vb2_vmalloc_dmabuf_ops_release,
339 };
340
vb2_vmalloc_get_dmabuf(struct vb2_buffer * vb,void * buf_priv,unsigned long flags)341 static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb,
342 void *buf_priv,
343 unsigned long flags)
344 {
345 struct vb2_vmalloc_buf *buf = buf_priv;
346 struct dma_buf *dbuf;
347 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
348
349 exp_info.ops = &vb2_vmalloc_dmabuf_ops;
350 exp_info.size = buf->size;
351 exp_info.flags = flags;
352 exp_info.priv = buf;
353
354 if (WARN_ON(!buf->vaddr))
355 return NULL;
356
357 dbuf = dma_buf_export(&exp_info);
358 if (IS_ERR(dbuf))
359 return NULL;
360
361 /* dmabuf keeps reference to vb2 buffer */
362 refcount_inc(&buf->refcount);
363
364 return dbuf;
365 }
366 #endif /* CONFIG_HAS_DMA */
367
368
369 /*********************************************/
370 /* callbacks for DMABUF buffers */
371 /*********************************************/
372
vb2_vmalloc_map_dmabuf(void * mem_priv)373 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
374 {
375 struct vb2_vmalloc_buf *buf = mem_priv;
376 struct iosys_map map;
377 int ret;
378
379 ret = dma_buf_vmap(buf->dbuf, &map);
380 if (ret)
381 return -EFAULT;
382 buf->vaddr = map.vaddr;
383
384 return 0;
385 }
386
vb2_vmalloc_unmap_dmabuf(void * mem_priv)387 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
388 {
389 struct vb2_vmalloc_buf *buf = mem_priv;
390 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
391
392 dma_buf_vunmap(buf->dbuf, &map);
393 buf->vaddr = NULL;
394 }
395
vb2_vmalloc_detach_dmabuf(void * mem_priv)396 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
397 {
398 struct vb2_vmalloc_buf *buf = mem_priv;
399 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
400
401 if (buf->vaddr)
402 dma_buf_vunmap(buf->dbuf, &map);
403
404 kfree(buf);
405 }
406
vb2_vmalloc_attach_dmabuf(struct vb2_buffer * vb,struct device * dev,struct dma_buf * dbuf,unsigned long size)407 static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
408 struct device *dev,
409 struct dma_buf *dbuf,
410 unsigned long size)
411 {
412 struct vb2_vmalloc_buf *buf;
413
414 if (dbuf->size < size)
415 return ERR_PTR(-EFAULT);
416
417 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
418 if (!buf)
419 return ERR_PTR(-ENOMEM);
420
421 buf->dbuf = dbuf;
422 buf->dma_dir = vb->vb2_queue->dma_dir;
423 buf->size = size;
424
425 return buf;
426 }
427
428
429 const struct vb2_mem_ops vb2_vmalloc_memops = {
430 .alloc = vb2_vmalloc_alloc,
431 .put = vb2_vmalloc_put,
432 .get_userptr = vb2_vmalloc_get_userptr,
433 .put_userptr = vb2_vmalloc_put_userptr,
434 #ifdef CONFIG_HAS_DMA
435 .get_dmabuf = vb2_vmalloc_get_dmabuf,
436 #endif
437 .map_dmabuf = vb2_vmalloc_map_dmabuf,
438 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
439 .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
440 .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
441 .vaddr = vb2_vmalloc_vaddr,
442 .mmap = vb2_vmalloc_mmap,
443 .num_users = vb2_vmalloc_num_users,
444 };
445 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
446
447 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
448 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
449 MODULE_LICENSE("GPL");
450 MODULE_IMPORT_NS(DMA_BUF);
451