1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ION Memory Allocator generic heap helpers
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7 
8 #include <linux/err.h>
9 #include <linux/freezer.h>
10 #include <linux/kthread.h>
11 #include <linux/mm.h>
12 #include <linux/rtmutex.h>
13 #include <linux/sched.h>
14 #include <uapi/linux/sched/types.h>
15 #include <linux/scatterlist.h>
16 #include <linux/vmalloc.h>
17 
18 #include "ion.h"
19 
ion_heap_map_kernel(struct ion_heap * heap,struct ion_buffer * buffer)20 void *ion_heap_map_kernel(struct ion_heap *heap,
21 			  struct ion_buffer *buffer)
22 {
23 	struct scatterlist *sg;
24 	int i, j;
25 	void *vaddr;
26 	pgprot_t pgprot;
27 	struct sg_table *table = buffer->sg_table;
28 	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
29 	struct page **pages = vmalloc(array_size(npages,
30 						 sizeof(struct page *)));
31 	struct page **tmp = pages;
32 
33 	if (!pages)
34 		return ERR_PTR(-ENOMEM);
35 
36 	if (buffer->flags & ION_FLAG_CACHED)
37 		pgprot = PAGE_KERNEL;
38 	else
39 		pgprot = pgprot_writecombine(PAGE_KERNEL);
40 
41 	for_each_sg(table->sgl, sg, table->nents, i) {
42 		int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
43 		struct page *page = sg_page(sg);
44 
45 		BUG_ON(i >= npages);
46 		for (j = 0; j < npages_this_entry; j++)
47 			*(tmp++) = page++;
48 	}
49 	vaddr = vmap(pages, npages, VM_MAP, pgprot);
50 	vfree(pages);
51 
52 	if (!vaddr)
53 		return ERR_PTR(-ENOMEM);
54 
55 	return vaddr;
56 }
57 
ion_heap_unmap_kernel(struct ion_heap * heap,struct ion_buffer * buffer)58 void ion_heap_unmap_kernel(struct ion_heap *heap,
59 			   struct ion_buffer *buffer)
60 {
61 	vunmap(buffer->vaddr);
62 }
63 
ion_heap_map_user(struct ion_heap * heap,struct ion_buffer * buffer,struct vm_area_struct * vma)64 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
65 		      struct vm_area_struct *vma)
66 {
67 	struct sg_table *table = buffer->sg_table;
68 	unsigned long addr = vma->vm_start;
69 	unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
70 	struct scatterlist *sg;
71 	int i;
72 	int ret;
73 
74 	for_each_sg(table->sgl, sg, table->nents, i) {
75 		struct page *page = sg_page(sg);
76 		unsigned long remainder = vma->vm_end - addr;
77 		unsigned long len = sg->length;
78 
79 		if (offset >= sg->length) {
80 			offset -= sg->length;
81 			continue;
82 		} else if (offset) {
83 			page += offset / PAGE_SIZE;
84 			len = sg->length - offset;
85 			offset = 0;
86 		}
87 		len = min(len, remainder);
88 		ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
89 				      vma->vm_page_prot);
90 		if (ret)
91 			return ret;
92 		addr += len;
93 		if (addr >= vma->vm_end)
94 			return 0;
95 	}
96 
97 	return 0;
98 }
99 
ion_heap_clear_pages(struct page ** pages,int num,pgprot_t pgprot)100 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
101 {
102 	void *addr = vm_map_ram(pages, num, -1, pgprot);
103 
104 	if (!addr)
105 		return -ENOMEM;
106 	memset(addr, 0, PAGE_SIZE * num);
107 	vm_unmap_ram(addr, num);
108 
109 	return 0;
110 }
111 
ion_heap_sglist_zero(struct scatterlist * sgl,unsigned int nents,pgprot_t pgprot)112 static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
113 				pgprot_t pgprot)
114 {
115 	int p = 0;
116 	int ret = 0;
117 	struct sg_page_iter piter;
118 	struct page *pages[32];
119 
120 	for_each_sg_page(sgl, &piter, nents, 0) {
121 		pages[p++] = sg_page_iter_page(&piter);
122 		if (p == ARRAY_SIZE(pages)) {
123 			ret = ion_heap_clear_pages(pages, p, pgprot);
124 			if (ret)
125 				return ret;
126 			p = 0;
127 		}
128 	}
129 	if (p)
130 		ret = ion_heap_clear_pages(pages, p, pgprot);
131 
132 	return ret;
133 }
134 
ion_heap_buffer_zero(struct ion_buffer * buffer)135 int ion_heap_buffer_zero(struct ion_buffer *buffer)
136 {
137 	struct sg_table *table = buffer->sg_table;
138 	pgprot_t pgprot;
139 
140 	if (buffer->flags & ION_FLAG_CACHED)
141 		pgprot = PAGE_KERNEL;
142 	else
143 		pgprot = pgprot_writecombine(PAGE_KERNEL);
144 
145 	return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
146 }
147 
ion_heap_pages_zero(struct page * page,size_t size,pgprot_t pgprot)148 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
149 {
150 	struct scatterlist sg;
151 
152 	sg_init_table(&sg, 1);
153 	sg_set_page(&sg, page, size, 0);
154 	return ion_heap_sglist_zero(&sg, 1, pgprot);
155 }
156 
ion_heap_freelist_add(struct ion_heap * heap,struct ion_buffer * buffer)157 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
158 {
159 	spin_lock(&heap->free_lock);
160 	list_add(&buffer->list, &heap->free_list);
161 	heap->free_list_size += buffer->size;
162 	spin_unlock(&heap->free_lock);
163 	wake_up(&heap->waitqueue);
164 }
165 
ion_heap_freelist_size(struct ion_heap * heap)166 size_t ion_heap_freelist_size(struct ion_heap *heap)
167 {
168 	size_t size;
169 
170 	spin_lock(&heap->free_lock);
171 	size = heap->free_list_size;
172 	spin_unlock(&heap->free_lock);
173 
174 	return size;
175 }
176 
_ion_heap_freelist_drain(struct ion_heap * heap,size_t size,bool skip_pools)177 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
178 				       bool skip_pools)
179 {
180 	struct ion_buffer *buffer;
181 	size_t total_drained = 0;
182 
183 	if (ion_heap_freelist_size(heap) == 0)
184 		return 0;
185 
186 	spin_lock(&heap->free_lock);
187 	if (size == 0)
188 		size = heap->free_list_size;
189 
190 	while (!list_empty(&heap->free_list)) {
191 		if (total_drained >= size)
192 			break;
193 		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
194 					  list);
195 		list_del(&buffer->list);
196 		heap->free_list_size -= buffer->size;
197 		if (skip_pools)
198 			buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
199 		total_drained += buffer->size;
200 		spin_unlock(&heap->free_lock);
201 		ion_buffer_destroy(buffer);
202 		spin_lock(&heap->free_lock);
203 	}
204 	spin_unlock(&heap->free_lock);
205 
206 	return total_drained;
207 }
208 
ion_heap_freelist_drain(struct ion_heap * heap,size_t size)209 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
210 {
211 	return _ion_heap_freelist_drain(heap, size, false);
212 }
213 
ion_heap_freelist_shrink(struct ion_heap * heap,size_t size)214 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
215 {
216 	return _ion_heap_freelist_drain(heap, size, true);
217 }
218 
ion_heap_deferred_free(void * data)219 static int ion_heap_deferred_free(void *data)
220 {
221 	struct ion_heap *heap = data;
222 
223 	while (true) {
224 		struct ion_buffer *buffer;
225 
226 		wait_event_freezable(heap->waitqueue,
227 				     ion_heap_freelist_size(heap) > 0);
228 
229 		spin_lock(&heap->free_lock);
230 		if (list_empty(&heap->free_list)) {
231 			spin_unlock(&heap->free_lock);
232 			continue;
233 		}
234 		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
235 					  list);
236 		list_del(&buffer->list);
237 		heap->free_list_size -= buffer->size;
238 		spin_unlock(&heap->free_lock);
239 		ion_buffer_destroy(buffer);
240 	}
241 
242 	return 0;
243 }
244 
ion_heap_init_deferred_free(struct ion_heap * heap)245 int ion_heap_init_deferred_free(struct ion_heap *heap)
246 {
247 	struct sched_param param = { .sched_priority = 0 };
248 
249 	INIT_LIST_HEAD(&heap->free_list);
250 	init_waitqueue_head(&heap->waitqueue);
251 	heap->task = kthread_run(ion_heap_deferred_free, heap,
252 				 "%s", heap->name);
253 	if (IS_ERR(heap->task)) {
254 		pr_err("%s: creating thread for deferred free failed\n",
255 		       __func__);
256 		return PTR_ERR_OR_ZERO(heap->task);
257 	}
258 	sched_setscheduler(heap->task, SCHED_IDLE, &param);
259 
260 	return 0;
261 }
262 
ion_heap_shrink_count(struct shrinker * shrinker,struct shrink_control * sc)263 static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
264 					   struct shrink_control *sc)
265 {
266 	struct ion_heap *heap = container_of(shrinker, struct ion_heap,
267 					     shrinker);
268 	int total = 0;
269 
270 	total = ion_heap_freelist_size(heap) / PAGE_SIZE;
271 
272 	if (heap->ops->shrink)
273 		total += heap->ops->shrink(heap, sc->gfp_mask, 0);
274 
275 	return total;
276 }
277 
ion_heap_shrink_scan(struct shrinker * shrinker,struct shrink_control * sc)278 static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
279 					  struct shrink_control *sc)
280 {
281 	struct ion_heap *heap = container_of(shrinker, struct ion_heap,
282 					     shrinker);
283 	int freed = 0;
284 	int to_scan = sc->nr_to_scan;
285 
286 	if (to_scan == 0)
287 		return 0;
288 
289 	/*
290 	 * shrink the free list first, no point in zeroing the memory if we're
291 	 * just going to reclaim it. Also, skip any possible page pooling.
292 	 */
293 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
294 		freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
295 				PAGE_SIZE;
296 
297 	to_scan -= freed;
298 	if (to_scan <= 0)
299 		return freed;
300 
301 	if (heap->ops->shrink)
302 		freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
303 
304 	return freed;
305 }
306 
ion_heap_init_shrinker(struct ion_heap * heap)307 int ion_heap_init_shrinker(struct ion_heap *heap)
308 {
309 	heap->shrinker.count_objects = ion_heap_shrink_count;
310 	heap->shrinker.scan_objects = ion_heap_shrink_scan;
311 	heap->shrinker.seeks = DEFAULT_SEEKS;
312 	heap->shrinker.batch = 0;
313 
314 	return register_shrinker(&heap->shrinker);
315 }
316