1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7 
8 #include <linux/anon_inodes.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/dma-buf.h>
12 #include <linux/err.h>
13 #include <linux/export.h>
14 #include <linux/file.h>
15 #include <linux/freezer.h>
16 #include <linux/fs.h>
17 #include <linux/idr.h>
18 #include <linux/kthread.h>
19 #include <linux/list.h>
20 #include <linux/memblock.h>
21 #include <linux/miscdevice.h>
22 #include <linux/mm.h>
23 #include <linux/mm_types.h>
24 #include <linux/rbtree.h>
25 #include <linux/sched/task.h>
26 #include <linux/seq_file.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 
31 #include "ion.h"
32 
33 static struct ion_device *internal_dev;
34 static int heap_id;
35 
36 /* this function should only be called while dev->lock is held */
ion_buffer_add(struct ion_device * dev,struct ion_buffer * buffer)37 static void ion_buffer_add(struct ion_device *dev,
38 			   struct ion_buffer *buffer)
39 {
40 	struct rb_node **p = &dev->buffers.rb_node;
41 	struct rb_node *parent = NULL;
42 	struct ion_buffer *entry;
43 
44 	while (*p) {
45 		parent = *p;
46 		entry = rb_entry(parent, struct ion_buffer, node);
47 
48 		if (buffer < entry) {
49 			p = &(*p)->rb_left;
50 		} else if (buffer > entry) {
51 			p = &(*p)->rb_right;
52 		} else {
53 			pr_err("%s: buffer already found.", __func__);
54 			BUG();
55 		}
56 	}
57 
58 	rb_link_node(&buffer->node, parent, p);
59 	rb_insert_color(&buffer->node, &dev->buffers);
60 }
61 
62 /* this function should only be called while dev->lock is held */
ion_buffer_create(struct ion_heap * heap,struct ion_device * dev,unsigned long len,unsigned long flags)63 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
64 					    struct ion_device *dev,
65 					    unsigned long len,
66 					    unsigned long flags)
67 {
68 	struct ion_buffer *buffer;
69 	int ret;
70 
71 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
72 	if (!buffer)
73 		return ERR_PTR(-ENOMEM);
74 
75 	buffer->heap = heap;
76 	buffer->flags = flags;
77 	buffer->dev = dev;
78 	buffer->size = len;
79 
80 	ret = heap->ops->allocate(heap, buffer, len, flags);
81 
82 	if (ret) {
83 		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
84 			goto err2;
85 
86 		ion_heap_freelist_drain(heap, 0);
87 		ret = heap->ops->allocate(heap, buffer, len, flags);
88 		if (ret)
89 			goto err2;
90 	}
91 
92 	if (!buffer->sg_table) {
93 		WARN_ONCE(1, "This heap needs to set the sgtable");
94 		ret = -EINVAL;
95 		goto err1;
96 	}
97 
98 	INIT_LIST_HEAD(&buffer->attachments);
99 	mutex_init(&buffer->lock);
100 	mutex_lock(&dev->buffer_lock);
101 	ion_buffer_add(dev, buffer);
102 	mutex_unlock(&dev->buffer_lock);
103 	return buffer;
104 
105 err1:
106 	heap->ops->free(buffer);
107 err2:
108 	kfree(buffer);
109 	return ERR_PTR(ret);
110 }
111 
ion_buffer_destroy(struct ion_buffer * buffer)112 void ion_buffer_destroy(struct ion_buffer *buffer)
113 {
114 	if (buffer->kmap_cnt > 0) {
115 		pr_warn_once("%s: buffer still mapped in the kernel\n",
116 			     __func__);
117 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
118 	}
119 	buffer->heap->ops->free(buffer);
120 	kfree(buffer);
121 }
122 
_ion_buffer_destroy(struct ion_buffer * buffer)123 static void _ion_buffer_destroy(struct ion_buffer *buffer)
124 {
125 	struct ion_heap *heap = buffer->heap;
126 	struct ion_device *dev = buffer->dev;
127 
128 	mutex_lock(&dev->buffer_lock);
129 	rb_erase(&buffer->node, &dev->buffers);
130 	mutex_unlock(&dev->buffer_lock);
131 
132 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
133 		ion_heap_freelist_add(heap, buffer);
134 	else
135 		ion_buffer_destroy(buffer);
136 }
137 
ion_buffer_kmap_get(struct ion_buffer * buffer)138 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
139 {
140 	void *vaddr;
141 
142 	if (buffer->kmap_cnt) {
143 		buffer->kmap_cnt++;
144 		return buffer->vaddr;
145 	}
146 	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
147 	if (WARN_ONCE(!vaddr,
148 		      "heap->ops->map_kernel should return ERR_PTR on error"))
149 		return ERR_PTR(-EINVAL);
150 	if (IS_ERR(vaddr))
151 		return vaddr;
152 	buffer->vaddr = vaddr;
153 	buffer->kmap_cnt++;
154 	return vaddr;
155 }
156 
ion_buffer_kmap_put(struct ion_buffer * buffer)157 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
158 {
159 	buffer->kmap_cnt--;
160 	if (!buffer->kmap_cnt) {
161 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
162 		buffer->vaddr = NULL;
163 	}
164 }
165 
dup_sg_table(struct sg_table * table)166 static struct sg_table *dup_sg_table(struct sg_table *table)
167 {
168 	struct sg_table *new_table;
169 	int ret, i;
170 	struct scatterlist *sg, *new_sg;
171 
172 	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
173 	if (!new_table)
174 		return ERR_PTR(-ENOMEM);
175 
176 	ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
177 	if (ret) {
178 		kfree(new_table);
179 		return ERR_PTR(-ENOMEM);
180 	}
181 
182 	new_sg = new_table->sgl;
183 	for_each_sg(table->sgl, sg, table->nents, i) {
184 		memcpy(new_sg, sg, sizeof(*sg));
185 		new_sg->dma_address = 0;
186 		new_sg = sg_next(new_sg);
187 	}
188 
189 	return new_table;
190 }
191 
free_duped_table(struct sg_table * table)192 static void free_duped_table(struct sg_table *table)
193 {
194 	sg_free_table(table);
195 	kfree(table);
196 }
197 
198 struct ion_dma_buf_attachment {
199 	struct device *dev;
200 	struct sg_table *table;
201 	struct list_head list;
202 };
203 
ion_dma_buf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)204 static int ion_dma_buf_attach(struct dma_buf *dmabuf,
205 			      struct dma_buf_attachment *attachment)
206 {
207 	struct ion_dma_buf_attachment *a;
208 	struct sg_table *table;
209 	struct ion_buffer *buffer = dmabuf->priv;
210 
211 	a = kzalloc(sizeof(*a), GFP_KERNEL);
212 	if (!a)
213 		return -ENOMEM;
214 
215 	table = dup_sg_table(buffer->sg_table);
216 	if (IS_ERR(table)) {
217 		kfree(a);
218 		return -ENOMEM;
219 	}
220 
221 	a->table = table;
222 	a->dev = attachment->dev;
223 	INIT_LIST_HEAD(&a->list);
224 
225 	attachment->priv = a;
226 
227 	mutex_lock(&buffer->lock);
228 	list_add(&a->list, &buffer->attachments);
229 	mutex_unlock(&buffer->lock);
230 
231 	return 0;
232 }
233 
ion_dma_buf_detatch(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)234 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
235 				struct dma_buf_attachment *attachment)
236 {
237 	struct ion_dma_buf_attachment *a = attachment->priv;
238 	struct ion_buffer *buffer = dmabuf->priv;
239 
240 	free_duped_table(a->table);
241 	mutex_lock(&buffer->lock);
242 	list_del(&a->list);
243 	mutex_unlock(&buffer->lock);
244 
245 	kfree(a);
246 }
247 
ion_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)248 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
249 					enum dma_data_direction direction)
250 {
251 	struct ion_dma_buf_attachment *a = attachment->priv;
252 	struct sg_table *table;
253 
254 	table = a->table;
255 
256 	if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
257 			direction))
258 		return ERR_PTR(-ENOMEM);
259 
260 	return table;
261 }
262 
ion_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)263 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
264 			      struct sg_table *table,
265 			      enum dma_data_direction direction)
266 {
267 	dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
268 }
269 
ion_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)270 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
271 {
272 	struct ion_buffer *buffer = dmabuf->priv;
273 	int ret = 0;
274 
275 	if (!buffer->heap->ops->map_user) {
276 		pr_err("%s: this heap does not define a method for mapping to userspace\n",
277 		       __func__);
278 		return -EINVAL;
279 	}
280 
281 	if (!(buffer->flags & ION_FLAG_CACHED))
282 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
283 
284 	mutex_lock(&buffer->lock);
285 	/* now map it to userspace */
286 	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
287 	mutex_unlock(&buffer->lock);
288 
289 	if (ret)
290 		pr_err("%s: failure mapping buffer to userspace\n",
291 		       __func__);
292 
293 	return ret;
294 }
295 
ion_dma_buf_release(struct dma_buf * dmabuf)296 static void ion_dma_buf_release(struct dma_buf *dmabuf)
297 {
298 	struct ion_buffer *buffer = dmabuf->priv;
299 
300 	_ion_buffer_destroy(buffer);
301 }
302 
ion_dma_buf_kmap(struct dma_buf * dmabuf,unsigned long offset)303 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
304 {
305 	struct ion_buffer *buffer = dmabuf->priv;
306 
307 	return buffer->vaddr + offset * PAGE_SIZE;
308 }
309 
ion_dma_buf_kunmap(struct dma_buf * dmabuf,unsigned long offset,void * ptr)310 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
311 			       void *ptr)
312 {
313 }
314 
ion_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)315 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
316 					enum dma_data_direction direction)
317 {
318 	struct ion_buffer *buffer = dmabuf->priv;
319 	void *vaddr;
320 	struct ion_dma_buf_attachment *a;
321 	int ret = 0;
322 
323 	/*
324 	 * TODO: Move this elsewhere because we don't always need a vaddr
325 	 */
326 	if (buffer->heap->ops->map_kernel) {
327 		mutex_lock(&buffer->lock);
328 		vaddr = ion_buffer_kmap_get(buffer);
329 		if (IS_ERR(vaddr)) {
330 			ret = PTR_ERR(vaddr);
331 			goto unlock;
332 		}
333 		mutex_unlock(&buffer->lock);
334 	}
335 
336 	mutex_lock(&buffer->lock);
337 	list_for_each_entry(a, &buffer->attachments, list) {
338 		dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
339 				    direction);
340 	}
341 
342 unlock:
343 	mutex_unlock(&buffer->lock);
344 	return ret;
345 }
346 
ion_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)347 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
348 				      enum dma_data_direction direction)
349 {
350 	struct ion_buffer *buffer = dmabuf->priv;
351 	struct ion_dma_buf_attachment *a;
352 
353 	if (buffer->heap->ops->map_kernel) {
354 		mutex_lock(&buffer->lock);
355 		ion_buffer_kmap_put(buffer);
356 		mutex_unlock(&buffer->lock);
357 	}
358 
359 	mutex_lock(&buffer->lock);
360 	list_for_each_entry(a, &buffer->attachments, list) {
361 		dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
362 				       direction);
363 	}
364 	mutex_unlock(&buffer->lock);
365 
366 	return 0;
367 }
368 
369 static const struct dma_buf_ops dma_buf_ops = {
370 	.map_dma_buf = ion_map_dma_buf,
371 	.unmap_dma_buf = ion_unmap_dma_buf,
372 	.mmap = ion_mmap,
373 	.release = ion_dma_buf_release,
374 	.attach = ion_dma_buf_attach,
375 	.detach = ion_dma_buf_detatch,
376 	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
377 	.end_cpu_access = ion_dma_buf_end_cpu_access,
378 	.map = ion_dma_buf_kmap,
379 	.unmap = ion_dma_buf_kunmap,
380 };
381 
ion_alloc(size_t len,unsigned int heap_id_mask,unsigned int flags)382 int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
383 {
384 	struct ion_device *dev = internal_dev;
385 	struct ion_buffer *buffer = NULL;
386 	struct ion_heap *heap;
387 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
388 	int fd;
389 	struct dma_buf *dmabuf;
390 
391 	pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
392 		 len, heap_id_mask, flags);
393 	/*
394 	 * traverse the list of heaps available in this system in priority
395 	 * order.  If the heap type is supported by the client, and matches the
396 	 * request of the caller allocate from it.  Repeat until allocate has
397 	 * succeeded or all heaps have been tried
398 	 */
399 	len = PAGE_ALIGN(len);
400 
401 	if (!len)
402 		return -EINVAL;
403 
404 	down_read(&dev->lock);
405 	plist_for_each_entry(heap, &dev->heaps, node) {
406 		/* if the caller didn't specify this heap id */
407 		if (!((1 << heap->id) & heap_id_mask))
408 			continue;
409 		buffer = ion_buffer_create(heap, dev, len, flags);
410 		if (!IS_ERR(buffer))
411 			break;
412 	}
413 	up_read(&dev->lock);
414 
415 	if (!buffer)
416 		return -ENODEV;
417 
418 	if (IS_ERR(buffer))
419 		return PTR_ERR(buffer);
420 
421 	exp_info.ops = &dma_buf_ops;
422 	exp_info.size = buffer->size;
423 	exp_info.flags = O_RDWR;
424 	exp_info.priv = buffer;
425 
426 	dmabuf = dma_buf_export(&exp_info);
427 	if (IS_ERR(dmabuf)) {
428 		_ion_buffer_destroy(buffer);
429 		return PTR_ERR(dmabuf);
430 	}
431 
432 	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
433 	if (fd < 0)
434 		dma_buf_put(dmabuf);
435 
436 	return fd;
437 }
438 
ion_query_heaps(struct ion_heap_query * query)439 int ion_query_heaps(struct ion_heap_query *query)
440 {
441 	struct ion_device *dev = internal_dev;
442 	struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
443 	int ret = -EINVAL, cnt = 0, max_cnt;
444 	struct ion_heap *heap;
445 	struct ion_heap_data hdata;
446 
447 	memset(&hdata, 0, sizeof(hdata));
448 
449 	down_read(&dev->lock);
450 	if (!buffer) {
451 		query->cnt = dev->heap_cnt;
452 		ret = 0;
453 		goto out;
454 	}
455 
456 	if (query->cnt <= 0)
457 		goto out;
458 
459 	max_cnt = query->cnt;
460 
461 	plist_for_each_entry(heap, &dev->heaps, node) {
462 		strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
463 		hdata.name[sizeof(hdata.name) - 1] = '\0';
464 		hdata.type = heap->type;
465 		hdata.heap_id = heap->id;
466 
467 		if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
468 			ret = -EFAULT;
469 			goto out;
470 		}
471 
472 		cnt++;
473 		if (cnt >= max_cnt)
474 			break;
475 	}
476 
477 	query->cnt = cnt;
478 	ret = 0;
479 out:
480 	up_read(&dev->lock);
481 	return ret;
482 }
483 
484 static const struct file_operations ion_fops = {
485 	.owner          = THIS_MODULE,
486 	.unlocked_ioctl = ion_ioctl,
487 #ifdef CONFIG_COMPAT
488 	.compat_ioctl	= ion_ioctl,
489 #endif
490 };
491 
debug_shrink_set(void * data,u64 val)492 static int debug_shrink_set(void *data, u64 val)
493 {
494 	struct ion_heap *heap = data;
495 	struct shrink_control sc;
496 	int objs;
497 
498 	sc.gfp_mask = GFP_HIGHUSER;
499 	sc.nr_to_scan = val;
500 
501 	if (!val) {
502 		objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
503 		sc.nr_to_scan = objs;
504 	}
505 
506 	heap->shrinker.scan_objects(&heap->shrinker, &sc);
507 	return 0;
508 }
509 
debug_shrink_get(void * data,u64 * val)510 static int debug_shrink_get(void *data, u64 *val)
511 {
512 	struct ion_heap *heap = data;
513 	struct shrink_control sc;
514 	int objs;
515 
516 	sc.gfp_mask = GFP_HIGHUSER;
517 	sc.nr_to_scan = 0;
518 
519 	objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
520 	*val = objs;
521 	return 0;
522 }
523 
524 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
525 			debug_shrink_set, "%llu\n");
526 
ion_device_add_heap(struct ion_heap * heap)527 void ion_device_add_heap(struct ion_heap *heap)
528 {
529 	struct ion_device *dev = internal_dev;
530 	int ret;
531 
532 	if (!heap->ops->allocate || !heap->ops->free)
533 		pr_err("%s: can not add heap with invalid ops struct.\n",
534 		       __func__);
535 
536 	spin_lock_init(&heap->free_lock);
537 	heap->free_list_size = 0;
538 
539 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
540 		ion_heap_init_deferred_free(heap);
541 
542 	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
543 		ret = ion_heap_init_shrinker(heap);
544 		if (ret)
545 			pr_err("%s: Failed to register shrinker\n", __func__);
546 	}
547 
548 	heap->dev = dev;
549 	down_write(&dev->lock);
550 	heap->id = heap_id++;
551 	/*
552 	 * use negative heap->id to reverse the priority -- when traversing
553 	 * the list later attempt higher id numbers first
554 	 */
555 	plist_node_init(&heap->node, -heap->id);
556 	plist_add(&heap->node, &dev->heaps);
557 
558 	if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
559 		char debug_name[64];
560 
561 		snprintf(debug_name, 64, "%s_shrink", heap->name);
562 		debugfs_create_file(debug_name, 0644, dev->debug_root,
563 				    heap, &debug_shrink_fops);
564 	}
565 
566 	dev->heap_cnt++;
567 	up_write(&dev->lock);
568 }
569 EXPORT_SYMBOL(ion_device_add_heap);
570 
ion_device_create(void)571 static int ion_device_create(void)
572 {
573 	struct ion_device *idev;
574 	int ret;
575 
576 	idev = kzalloc(sizeof(*idev), GFP_KERNEL);
577 	if (!idev)
578 		return -ENOMEM;
579 
580 	idev->dev.minor = MISC_DYNAMIC_MINOR;
581 	idev->dev.name = "ion";
582 	idev->dev.fops = &ion_fops;
583 	idev->dev.parent = NULL;
584 	ret = misc_register(&idev->dev);
585 	if (ret) {
586 		pr_err("ion: failed to register misc device.\n");
587 		kfree(idev);
588 		return ret;
589 	}
590 
591 	idev->debug_root = debugfs_create_dir("ion", NULL);
592 	idev->buffers = RB_ROOT;
593 	mutex_init(&idev->buffer_lock);
594 	init_rwsem(&idev->lock);
595 	plist_head_init(&idev->heaps);
596 	internal_dev = idev;
597 	return 0;
598 }
599 subsys_initcall(ion_device_create);
600