1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ION Memory Allocator
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7 
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/dma-buf.h>
11 #include <linux/err.h>
12 #include <linux/export.h>
13 #include <linux/file.h>
14 #include <linux/freezer.h>
15 #include <linux/fs.h>
16 #include <linux/kthread.h>
17 #include <linux/list.h>
18 #include <linux/miscdevice.h>
19 #include <linux/mm.h>
20 #include <linux/mm_types.h>
21 #include <linux/rbtree.h>
22 #include <linux/sched/task.h>
23 #include <linux/slab.h>
24 #include <linux/uaccess.h>
25 #include <linux/vmalloc.h>
26 
27 #include "ion.h"
28 
29 static struct ion_device *internal_dev;
30 static int heap_id;
31 
32 /* this function should only be called while dev->lock is held */
ion_buffer_create(struct ion_heap * heap,struct ion_device * dev,unsigned long len,unsigned long flags)33 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
34 					    struct ion_device *dev,
35 					    unsigned long len,
36 					    unsigned long flags)
37 {
38 	struct ion_buffer *buffer;
39 	int ret;
40 
41 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
42 	if (!buffer)
43 		return ERR_PTR(-ENOMEM);
44 
45 	buffer->heap = heap;
46 	buffer->flags = flags;
47 	buffer->dev = dev;
48 	buffer->size = len;
49 
50 	ret = heap->ops->allocate(heap, buffer, len, flags);
51 
52 	if (ret) {
53 		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
54 			goto err2;
55 
56 		ion_heap_freelist_drain(heap, 0);
57 		ret = heap->ops->allocate(heap, buffer, len, flags);
58 		if (ret)
59 			goto err2;
60 	}
61 
62 	if (!buffer->sg_table) {
63 		WARN_ONCE(1, "This heap needs to set the sgtable");
64 		ret = -EINVAL;
65 		goto err1;
66 	}
67 
68 	spin_lock(&heap->stat_lock);
69 	heap->num_of_buffers++;
70 	heap->num_of_alloc_bytes += len;
71 	if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm)
72 		heap->alloc_bytes_wm = heap->num_of_alloc_bytes;
73 	spin_unlock(&heap->stat_lock);
74 
75 	INIT_LIST_HEAD(&buffer->attachments);
76 	mutex_init(&buffer->lock);
77 	return buffer;
78 
79 err1:
80 	heap->ops->free(buffer);
81 err2:
82 	kfree(buffer);
83 	return ERR_PTR(ret);
84 }
85 
ion_buffer_destroy(struct ion_buffer * buffer)86 void ion_buffer_destroy(struct ion_buffer *buffer)
87 {
88 	if (buffer->kmap_cnt > 0) {
89 		pr_warn_once("%s: buffer still mapped in the kernel\n",
90 			     __func__);
91 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
92 	}
93 	buffer->heap->ops->free(buffer);
94 	spin_lock(&buffer->heap->stat_lock);
95 	buffer->heap->num_of_buffers--;
96 	buffer->heap->num_of_alloc_bytes -= buffer->size;
97 	spin_unlock(&buffer->heap->stat_lock);
98 
99 	kfree(buffer);
100 }
101 
_ion_buffer_destroy(struct ion_buffer * buffer)102 static void _ion_buffer_destroy(struct ion_buffer *buffer)
103 {
104 	struct ion_heap *heap = buffer->heap;
105 
106 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
107 		ion_heap_freelist_add(heap, buffer);
108 	else
109 		ion_buffer_destroy(buffer);
110 }
111 
ion_buffer_kmap_get(struct ion_buffer * buffer)112 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
113 {
114 	void *vaddr;
115 
116 	if (buffer->kmap_cnt) {
117 		buffer->kmap_cnt++;
118 		return buffer->vaddr;
119 	}
120 	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
121 	if (WARN_ONCE(!vaddr,
122 		      "heap->ops->map_kernel should return ERR_PTR on error"))
123 		return ERR_PTR(-EINVAL);
124 	if (IS_ERR(vaddr))
125 		return vaddr;
126 	buffer->vaddr = vaddr;
127 	buffer->kmap_cnt++;
128 	return vaddr;
129 }
130 
ion_buffer_kmap_put(struct ion_buffer * buffer)131 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
132 {
133 	buffer->kmap_cnt--;
134 	if (!buffer->kmap_cnt) {
135 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
136 		buffer->vaddr = NULL;
137 	}
138 }
139 
dup_sg_table(struct sg_table * table)140 static struct sg_table *dup_sg_table(struct sg_table *table)
141 {
142 	struct sg_table *new_table;
143 	int ret, i;
144 	struct scatterlist *sg, *new_sg;
145 
146 	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
147 	if (!new_table)
148 		return ERR_PTR(-ENOMEM);
149 
150 	ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
151 	if (ret) {
152 		kfree(new_table);
153 		return ERR_PTR(-ENOMEM);
154 	}
155 
156 	new_sg = new_table->sgl;
157 	for_each_sg(table->sgl, sg, table->nents, i) {
158 		memcpy(new_sg, sg, sizeof(*sg));
159 		new_sg->dma_address = 0;
160 		new_sg = sg_next(new_sg);
161 	}
162 
163 	return new_table;
164 }
165 
free_duped_table(struct sg_table * table)166 static void free_duped_table(struct sg_table *table)
167 {
168 	sg_free_table(table);
169 	kfree(table);
170 }
171 
172 struct ion_dma_buf_attachment {
173 	struct device *dev;
174 	struct sg_table *table;
175 	struct list_head list;
176 };
177 
ion_dma_buf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)178 static int ion_dma_buf_attach(struct dma_buf *dmabuf,
179 			      struct dma_buf_attachment *attachment)
180 {
181 	struct ion_dma_buf_attachment *a;
182 	struct sg_table *table;
183 	struct ion_buffer *buffer = dmabuf->priv;
184 
185 	a = kzalloc(sizeof(*a), GFP_KERNEL);
186 	if (!a)
187 		return -ENOMEM;
188 
189 	table = dup_sg_table(buffer->sg_table);
190 	if (IS_ERR(table)) {
191 		kfree(a);
192 		return -ENOMEM;
193 	}
194 
195 	a->table = table;
196 	a->dev = attachment->dev;
197 	INIT_LIST_HEAD(&a->list);
198 
199 	attachment->priv = a;
200 
201 	mutex_lock(&buffer->lock);
202 	list_add(&a->list, &buffer->attachments);
203 	mutex_unlock(&buffer->lock);
204 
205 	return 0;
206 }
207 
ion_dma_buf_detatch(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)208 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
209 				struct dma_buf_attachment *attachment)
210 {
211 	struct ion_dma_buf_attachment *a = attachment->priv;
212 	struct ion_buffer *buffer = dmabuf->priv;
213 
214 	mutex_lock(&buffer->lock);
215 	list_del(&a->list);
216 	mutex_unlock(&buffer->lock);
217 	free_duped_table(a->table);
218 
219 	kfree(a);
220 }
221 
ion_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)222 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
223 					enum dma_data_direction direction)
224 {
225 	struct ion_dma_buf_attachment *a = attachment->priv;
226 	struct sg_table *table;
227 
228 	table = a->table;
229 
230 	if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
231 			direction))
232 		return ERR_PTR(-ENOMEM);
233 
234 	return table;
235 }
236 
ion_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)237 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
238 			      struct sg_table *table,
239 			      enum dma_data_direction direction)
240 {
241 	dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
242 }
243 
ion_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)244 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
245 {
246 	struct ion_buffer *buffer = dmabuf->priv;
247 	int ret = 0;
248 
249 	if (!buffer->heap->ops->map_user) {
250 		pr_err("%s: this heap does not define a method for mapping to userspace\n",
251 		       __func__);
252 		return -EINVAL;
253 	}
254 
255 	if (!(buffer->flags & ION_FLAG_CACHED))
256 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
257 
258 	mutex_lock(&buffer->lock);
259 	/* now map it to userspace */
260 	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
261 	mutex_unlock(&buffer->lock);
262 
263 	if (ret)
264 		pr_err("%s: failure mapping buffer to userspace\n",
265 		       __func__);
266 
267 	return ret;
268 }
269 
ion_dma_buf_release(struct dma_buf * dmabuf)270 static void ion_dma_buf_release(struct dma_buf *dmabuf)
271 {
272 	struct ion_buffer *buffer = dmabuf->priv;
273 
274 	_ion_buffer_destroy(buffer);
275 }
276 
ion_dma_buf_kmap(struct dma_buf * dmabuf,unsigned long offset)277 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
278 {
279 	struct ion_buffer *buffer = dmabuf->priv;
280 
281 	return buffer->vaddr + offset * PAGE_SIZE;
282 }
283 
ion_dma_buf_kunmap(struct dma_buf * dmabuf,unsigned long offset,void * ptr)284 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
285 			       void *ptr)
286 {
287 }
288 
ion_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)289 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
290 					enum dma_data_direction direction)
291 {
292 	struct ion_buffer *buffer = dmabuf->priv;
293 	void *vaddr;
294 	struct ion_dma_buf_attachment *a;
295 	int ret = 0;
296 
297 	/*
298 	 * TODO: Move this elsewhere because we don't always need a vaddr
299 	 */
300 	if (buffer->heap->ops->map_kernel) {
301 		mutex_lock(&buffer->lock);
302 		vaddr = ion_buffer_kmap_get(buffer);
303 		if (IS_ERR(vaddr)) {
304 			ret = PTR_ERR(vaddr);
305 			goto unlock;
306 		}
307 		mutex_unlock(&buffer->lock);
308 	}
309 
310 	mutex_lock(&buffer->lock);
311 	list_for_each_entry(a, &buffer->attachments, list) {
312 		dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
313 				    direction);
314 	}
315 
316 unlock:
317 	mutex_unlock(&buffer->lock);
318 	return ret;
319 }
320 
ion_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)321 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
322 				      enum dma_data_direction direction)
323 {
324 	struct ion_buffer *buffer = dmabuf->priv;
325 	struct ion_dma_buf_attachment *a;
326 
327 	if (buffer->heap->ops->map_kernel) {
328 		mutex_lock(&buffer->lock);
329 		ion_buffer_kmap_put(buffer);
330 		mutex_unlock(&buffer->lock);
331 	}
332 
333 	mutex_lock(&buffer->lock);
334 	list_for_each_entry(a, &buffer->attachments, list) {
335 		dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
336 				       direction);
337 	}
338 	mutex_unlock(&buffer->lock);
339 
340 	return 0;
341 }
342 
343 static const struct dma_buf_ops dma_buf_ops = {
344 	.map_dma_buf = ion_map_dma_buf,
345 	.unmap_dma_buf = ion_unmap_dma_buf,
346 	.mmap = ion_mmap,
347 	.release = ion_dma_buf_release,
348 	.attach = ion_dma_buf_attach,
349 	.detach = ion_dma_buf_detatch,
350 	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
351 	.end_cpu_access = ion_dma_buf_end_cpu_access,
352 	.map = ion_dma_buf_kmap,
353 	.unmap = ion_dma_buf_kunmap,
354 };
355 
ion_alloc(size_t len,unsigned int heap_id_mask,unsigned int flags)356 static int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
357 {
358 	struct ion_device *dev = internal_dev;
359 	struct ion_buffer *buffer = NULL;
360 	struct ion_heap *heap;
361 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
362 	int fd;
363 	struct dma_buf *dmabuf;
364 
365 	pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
366 		 len, heap_id_mask, flags);
367 	/*
368 	 * traverse the list of heaps available in this system in priority
369 	 * order.  If the heap type is supported by the client, and matches the
370 	 * request of the caller allocate from it.  Repeat until allocate has
371 	 * succeeded or all heaps have been tried
372 	 */
373 	len = PAGE_ALIGN(len);
374 
375 	if (!len)
376 		return -EINVAL;
377 
378 	down_read(&dev->lock);
379 	plist_for_each_entry(heap, &dev->heaps, node) {
380 		/* if the caller didn't specify this heap id */
381 		if (!((1 << heap->id) & heap_id_mask))
382 			continue;
383 		buffer = ion_buffer_create(heap, dev, len, flags);
384 		if (!IS_ERR(buffer))
385 			break;
386 	}
387 	up_read(&dev->lock);
388 
389 	if (!buffer)
390 		return -ENODEV;
391 
392 	if (IS_ERR(buffer))
393 		return PTR_ERR(buffer);
394 
395 	exp_info.ops = &dma_buf_ops;
396 	exp_info.size = buffer->size;
397 	exp_info.flags = O_RDWR;
398 	exp_info.priv = buffer;
399 
400 	dmabuf = dma_buf_export(&exp_info);
401 	if (IS_ERR(dmabuf)) {
402 		_ion_buffer_destroy(buffer);
403 		return PTR_ERR(dmabuf);
404 	}
405 
406 	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
407 	if (fd < 0)
408 		dma_buf_put(dmabuf);
409 
410 	return fd;
411 }
412 
ion_query_heaps(struct ion_heap_query * query)413 static int ion_query_heaps(struct ion_heap_query *query)
414 {
415 	struct ion_device *dev = internal_dev;
416 	struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
417 	int ret = -EINVAL, cnt = 0, max_cnt;
418 	struct ion_heap *heap;
419 	struct ion_heap_data hdata;
420 
421 	memset(&hdata, 0, sizeof(hdata));
422 
423 	down_read(&dev->lock);
424 	if (!buffer) {
425 		query->cnt = dev->heap_cnt;
426 		ret = 0;
427 		goto out;
428 	}
429 
430 	if (query->cnt <= 0)
431 		goto out;
432 
433 	max_cnt = query->cnt;
434 
435 	plist_for_each_entry(heap, &dev->heaps, node) {
436 		strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
437 		hdata.name[sizeof(hdata.name) - 1] = '\0';
438 		hdata.type = heap->type;
439 		hdata.heap_id = heap->id;
440 
441 		if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
442 			ret = -EFAULT;
443 			goto out;
444 		}
445 
446 		cnt++;
447 		if (cnt >= max_cnt)
448 			break;
449 	}
450 
451 	query->cnt = cnt;
452 	ret = 0;
453 out:
454 	up_read(&dev->lock);
455 	return ret;
456 }
457 
458 union ion_ioctl_arg {
459 	struct ion_allocation_data allocation;
460 	struct ion_heap_query query;
461 };
462 
validate_ioctl_arg(unsigned int cmd,union ion_ioctl_arg * arg)463 static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
464 {
465 	switch (cmd) {
466 	case ION_IOC_HEAP_QUERY:
467 		if (arg->query.reserved0 ||
468 		    arg->query.reserved1 ||
469 		    arg->query.reserved2)
470 			return -EINVAL;
471 		break;
472 	default:
473 		break;
474 	}
475 
476 	return 0;
477 }
478 
ion_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)479 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
480 {
481 	int ret = 0;
482 	union ion_ioctl_arg data;
483 
484 	if (_IOC_SIZE(cmd) > sizeof(data))
485 		return -EINVAL;
486 
487 	/*
488 	 * The copy_from_user is unconditional here for both read and write
489 	 * to do the validate. If there is no write for the ioctl, the
490 	 * buffer is cleared
491 	 */
492 	if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
493 		return -EFAULT;
494 
495 	ret = validate_ioctl_arg(cmd, &data);
496 	if (ret) {
497 		pr_warn_once("%s: ioctl validate failed\n", __func__);
498 		return ret;
499 	}
500 
501 	if (!(_IOC_DIR(cmd) & _IOC_WRITE))
502 		memset(&data, 0, sizeof(data));
503 
504 	switch (cmd) {
505 	case ION_IOC_ALLOC:
506 	{
507 		int fd;
508 
509 		fd = ion_alloc(data.allocation.len,
510 			       data.allocation.heap_id_mask,
511 			       data.allocation.flags);
512 		if (fd < 0)
513 			return fd;
514 
515 		data.allocation.fd = fd;
516 
517 		break;
518 	}
519 	case ION_IOC_HEAP_QUERY:
520 		ret = ion_query_heaps(&data.query);
521 		break;
522 	default:
523 		return -ENOTTY;
524 	}
525 
526 	if (_IOC_DIR(cmd) & _IOC_READ) {
527 		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
528 			return -EFAULT;
529 	}
530 	return ret;
531 }
532 
533 static const struct file_operations ion_fops = {
534 	.owner          = THIS_MODULE,
535 	.unlocked_ioctl = ion_ioctl,
536 #ifdef CONFIG_COMPAT
537 	.compat_ioctl	= ion_ioctl,
538 #endif
539 };
540 
debug_shrink_set(void * data,u64 val)541 static int debug_shrink_set(void *data, u64 val)
542 {
543 	struct ion_heap *heap = data;
544 	struct shrink_control sc;
545 	int objs;
546 
547 	sc.gfp_mask = GFP_HIGHUSER;
548 	sc.nr_to_scan = val;
549 
550 	if (!val) {
551 		objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
552 		sc.nr_to_scan = objs;
553 	}
554 
555 	heap->shrinker.scan_objects(&heap->shrinker, &sc);
556 	return 0;
557 }
558 
debug_shrink_get(void * data,u64 * val)559 static int debug_shrink_get(void *data, u64 *val)
560 {
561 	struct ion_heap *heap = data;
562 	struct shrink_control sc;
563 	int objs;
564 
565 	sc.gfp_mask = GFP_HIGHUSER;
566 	sc.nr_to_scan = 0;
567 
568 	objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
569 	*val = objs;
570 	return 0;
571 }
572 
573 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
574 			debug_shrink_set, "%llu\n");
575 
ion_device_add_heap(struct ion_heap * heap)576 void ion_device_add_heap(struct ion_heap *heap)
577 {
578 	struct ion_device *dev = internal_dev;
579 	int ret;
580 	struct dentry *heap_root;
581 	char debug_name[64];
582 
583 	if (!heap->ops->allocate || !heap->ops->free)
584 		pr_err("%s: can not add heap with invalid ops struct.\n",
585 		       __func__);
586 
587 	spin_lock_init(&heap->free_lock);
588 	spin_lock_init(&heap->stat_lock);
589 	heap->free_list_size = 0;
590 
591 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
592 		ion_heap_init_deferred_free(heap);
593 
594 	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
595 		ret = ion_heap_init_shrinker(heap);
596 		if (ret)
597 			pr_err("%s: Failed to register shrinker\n", __func__);
598 	}
599 
600 	heap->dev = dev;
601 	heap->num_of_buffers = 0;
602 	heap->num_of_alloc_bytes = 0;
603 	heap->alloc_bytes_wm = 0;
604 
605 	heap_root = debugfs_create_dir(heap->name, dev->debug_root);
606 	debugfs_create_u64("num_of_buffers",
607 			   0444, heap_root,
608 			   &heap->num_of_buffers);
609 	debugfs_create_u64("num_of_alloc_bytes",
610 			   0444,
611 			   heap_root,
612 			   &heap->num_of_alloc_bytes);
613 	debugfs_create_u64("alloc_bytes_wm",
614 			   0444,
615 			   heap_root,
616 			   &heap->alloc_bytes_wm);
617 
618 	if (heap->shrinker.count_objects &&
619 	    heap->shrinker.scan_objects) {
620 		snprintf(debug_name, 64, "%s_shrink", heap->name);
621 		debugfs_create_file(debug_name,
622 				    0644,
623 				    heap_root,
624 				    heap,
625 				    &debug_shrink_fops);
626 	}
627 
628 	down_write(&dev->lock);
629 	heap->id = heap_id++;
630 	/*
631 	 * use negative heap->id to reverse the priority -- when traversing
632 	 * the list later attempt higher id numbers first
633 	 */
634 	plist_node_init(&heap->node, -heap->id);
635 	plist_add(&heap->node, &dev->heaps);
636 
637 	dev->heap_cnt++;
638 	up_write(&dev->lock);
639 }
640 EXPORT_SYMBOL(ion_device_add_heap);
641 
ion_device_create(void)642 static int ion_device_create(void)
643 {
644 	struct ion_device *idev;
645 	int ret;
646 
647 	idev = kzalloc(sizeof(*idev), GFP_KERNEL);
648 	if (!idev)
649 		return -ENOMEM;
650 
651 	idev->dev.minor = MISC_DYNAMIC_MINOR;
652 	idev->dev.name = "ion";
653 	idev->dev.fops = &ion_fops;
654 	idev->dev.parent = NULL;
655 	ret = misc_register(&idev->dev);
656 	if (ret) {
657 		pr_err("ion: failed to register misc device.\n");
658 		kfree(idev);
659 		return ret;
660 	}
661 
662 	idev->debug_root = debugfs_create_dir("ion", NULL);
663 	init_rwsem(&idev->lock);
664 	plist_head_init(&idev->heaps);
665 	internal_dev = idev;
666 	return 0;
667 }
668 subsys_initcall(ion_device_create);
669