1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 *
5 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 *
7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 *
19 */
20 /*
21 * This file contains functions for buffer object structure management
22 */
23 #include <linux/kernel.h>
24 #include <linux/types.h>
25 #include <linux/gfp.h> /* for GFP_ATOMIC */
26 #include <linux/mm.h>
27 #include <linux/mm_types.h>
28 #include <linux/hugetlb.h>
29 #include <linux/highmem.h>
30 #include <linux/slab.h> /* for kmalloc */
31 #include <linux/module.h>
32 #include <linux/moduleparam.h>
33 #include <linux/string.h>
34 #include <linux/list.h>
35 #include <linux/errno.h>
36 #include <linux/io.h>
37 #include <asm/current.h>
38 #include <linux/sched/signal.h>
39 #include <linux/file.h>
40
41 #include <asm/set_memory.h>
42
43 #include "atomisp_internal.h"
44 #include "hmm/hmm_common.h"
45 #include "hmm/hmm_pool.h"
46 #include "hmm/hmm_bo.h"
47
order_to_nr(unsigned int order)48 static unsigned int order_to_nr(unsigned int order)
49 {
50 return 1U << order;
51 }
52
nr_to_order_bottom(unsigned int nr)53 static unsigned int nr_to_order_bottom(unsigned int nr)
54 {
55 return fls(nr) - 1;
56 }
57
__bo_init(struct hmm_bo_device * bdev,struct hmm_buffer_object * bo,unsigned int pgnr)58 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
59 unsigned int pgnr)
60 {
61 check_bodev_null_return(bdev, -EINVAL);
62 var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
63 "hmm_bo_device not inited yet.\n");
64 /* prevent zero size buffer object */
65 if (pgnr == 0) {
66 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
67 return -EINVAL;
68 }
69
70 memset(bo, 0, sizeof(*bo));
71 mutex_init(&bo->mutex);
72
73 /* init the bo->list HEAD as an element of entire_bo_list */
74 INIT_LIST_HEAD(&bo->list);
75
76 bo->bdev = bdev;
77 bo->vmap_addr = NULL;
78 bo->status = HMM_BO_FREE;
79 bo->start = bdev->start;
80 bo->pgnr = pgnr;
81 bo->end = bo->start + pgnr_to_size(pgnr);
82 bo->prev = NULL;
83 bo->next = NULL;
84
85 return 0;
86 }
87
__bo_search_and_remove_from_free_rbtree(struct rb_node * node,unsigned int pgnr)88 static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
89 struct rb_node *node, unsigned int pgnr)
90 {
91 struct hmm_buffer_object *this, *ret_bo, *temp_bo;
92
93 this = rb_entry(node, struct hmm_buffer_object, node);
94 if (this->pgnr == pgnr ||
95 (this->pgnr > pgnr && !this->node.rb_left)) {
96 goto remove_bo_and_return;
97 } else {
98 if (this->pgnr < pgnr) {
99 if (!this->node.rb_right)
100 return NULL;
101 ret_bo = __bo_search_and_remove_from_free_rbtree(
102 this->node.rb_right, pgnr);
103 } else {
104 ret_bo = __bo_search_and_remove_from_free_rbtree(
105 this->node.rb_left, pgnr);
106 }
107 if (!ret_bo) {
108 if (this->pgnr > pgnr)
109 goto remove_bo_and_return;
110 else
111 return NULL;
112 }
113 return ret_bo;
114 }
115
116 remove_bo_and_return:
117 /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
118 * 1. check if 'this->next' is NULL:
119 * yes: erase 'this' node and rebalance rbtree, return 'this'.
120 */
121 if (!this->next) {
122 rb_erase(&this->node, &this->bdev->free_rbtree);
123 return this;
124 }
125 /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
126 * 2. check if 'this->next->next' is NULL:
127 * yes: change the related 'next/prev' pointer,
128 * return 'this->next' but the rbtree stays unchanged.
129 */
130 temp_bo = this->next;
131 this->next = temp_bo->next;
132 if (temp_bo->next)
133 temp_bo->next->prev = this;
134 temp_bo->next = NULL;
135 temp_bo->prev = NULL;
136 return temp_bo;
137 }
138
__bo_search_by_addr(struct rb_root * root,ia_css_ptr start)139 static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
140 ia_css_ptr start)
141 {
142 struct rb_node *n = root->rb_node;
143 struct hmm_buffer_object *bo;
144
145 do {
146 bo = rb_entry(n, struct hmm_buffer_object, node);
147
148 if (bo->start > start) {
149 if (!n->rb_left)
150 return NULL;
151 n = n->rb_left;
152 } else if (bo->start < start) {
153 if (!n->rb_right)
154 return NULL;
155 n = n->rb_right;
156 } else {
157 return bo;
158 }
159 } while (n);
160
161 return NULL;
162 }
163
__bo_search_by_addr_in_range(struct rb_root * root,unsigned int start)164 static struct hmm_buffer_object *__bo_search_by_addr_in_range(
165 struct rb_root *root, unsigned int start)
166 {
167 struct rb_node *n = root->rb_node;
168 struct hmm_buffer_object *bo;
169
170 do {
171 bo = rb_entry(n, struct hmm_buffer_object, node);
172
173 if (bo->start > start) {
174 if (!n->rb_left)
175 return NULL;
176 n = n->rb_left;
177 } else {
178 if (bo->end > start)
179 return bo;
180 if (!n->rb_right)
181 return NULL;
182 n = n->rb_right;
183 }
184 } while (n);
185
186 return NULL;
187 }
188
__bo_insert_to_free_rbtree(struct rb_root * root,struct hmm_buffer_object * bo)189 static void __bo_insert_to_free_rbtree(struct rb_root *root,
190 struct hmm_buffer_object *bo)
191 {
192 struct rb_node **new = &root->rb_node;
193 struct rb_node *parent = NULL;
194 struct hmm_buffer_object *this;
195 unsigned int pgnr = bo->pgnr;
196
197 while (*new) {
198 parent = *new;
199 this = container_of(*new, struct hmm_buffer_object, node);
200
201 if (pgnr < this->pgnr) {
202 new = &((*new)->rb_left);
203 } else if (pgnr > this->pgnr) {
204 new = &((*new)->rb_right);
205 } else {
206 bo->prev = this;
207 bo->next = this->next;
208 if (this->next)
209 this->next->prev = bo;
210 this->next = bo;
211 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
212 return;
213 }
214 }
215
216 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
217
218 rb_link_node(&bo->node, parent, new);
219 rb_insert_color(&bo->node, root);
220 }
221
__bo_insert_to_alloc_rbtree(struct rb_root * root,struct hmm_buffer_object * bo)222 static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
223 struct hmm_buffer_object *bo)
224 {
225 struct rb_node **new = &root->rb_node;
226 struct rb_node *parent = NULL;
227 struct hmm_buffer_object *this;
228 unsigned int start = bo->start;
229
230 while (*new) {
231 parent = *new;
232 this = container_of(*new, struct hmm_buffer_object, node);
233
234 if (start < this->start)
235 new = &((*new)->rb_left);
236 else
237 new = &((*new)->rb_right);
238 }
239
240 kref_init(&bo->kref);
241 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
242
243 rb_link_node(&bo->node, parent, new);
244 rb_insert_color(&bo->node, root);
245 }
246
__bo_break_up(struct hmm_bo_device * bdev,struct hmm_buffer_object * bo,unsigned int pgnr)247 static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
248 struct hmm_buffer_object *bo,
249 unsigned int pgnr)
250 {
251 struct hmm_buffer_object *new_bo;
252 unsigned long flags;
253 int ret;
254
255 new_bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
256 if (!new_bo) {
257 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
258 return NULL;
259 }
260 ret = __bo_init(bdev, new_bo, pgnr);
261 if (ret) {
262 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
263 kmem_cache_free(bdev->bo_cache, new_bo);
264 return NULL;
265 }
266
267 new_bo->start = bo->start;
268 new_bo->end = new_bo->start + pgnr_to_size(pgnr);
269 bo->start = new_bo->end;
270 bo->pgnr = bo->pgnr - pgnr;
271
272 spin_lock_irqsave(&bdev->list_lock, flags);
273 list_add_tail(&new_bo->list, &bo->list);
274 spin_unlock_irqrestore(&bdev->list_lock, flags);
275
276 return new_bo;
277 }
278
__bo_take_off_handling(struct hmm_buffer_object * bo)279 static void __bo_take_off_handling(struct hmm_buffer_object *bo)
280 {
281 struct hmm_bo_device *bdev = bo->bdev;
282 /* There are 4 situations when we take off a known bo from free rbtree:
283 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
284 * and does not have a linked list after bo, to take off this bo,
285 * we just need erase bo directly and rebalance the free rbtree
286 */
287 if (!bo->prev && !bo->next) {
288 rb_erase(&bo->node, &bdev->free_rbtree);
289 /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
290 * and has a linked list,to take off this bo we need erase bo
291 * first, then, insert bo->next into free rbtree and rebalance
292 * the free rbtree
293 */
294 } else if (!bo->prev && bo->next) {
295 bo->next->prev = NULL;
296 rb_erase(&bo->node, &bdev->free_rbtree);
297 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
298 bo->next = NULL;
299 /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
300 * node, bo is the last element of the linked list after rbtree
301 * node, to take off this bo, we just need set the "prev/next"
302 * pointers to NULL, the free rbtree stays unchaged
303 */
304 } else if (bo->prev && !bo->next) {
305 bo->prev->next = NULL;
306 bo->prev = NULL;
307 /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
308 * node, bo is in the middle of the linked list after rbtree node,
309 * to take off this bo, we just set take the "prev/next" pointers
310 * to NULL, the free rbtree stays unchaged
311 */
312 } else if (bo->prev && bo->next) {
313 bo->next->prev = bo->prev;
314 bo->prev->next = bo->next;
315 bo->next = NULL;
316 bo->prev = NULL;
317 }
318 }
319
__bo_merge(struct hmm_buffer_object * bo,struct hmm_buffer_object * next_bo)320 static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
321 struct hmm_buffer_object *next_bo)
322 {
323 struct hmm_bo_device *bdev;
324 unsigned long flags;
325
326 bdev = bo->bdev;
327 next_bo->start = bo->start;
328 next_bo->pgnr = next_bo->pgnr + bo->pgnr;
329
330 spin_lock_irqsave(&bdev->list_lock, flags);
331 list_del(&bo->list);
332 spin_unlock_irqrestore(&bdev->list_lock, flags);
333
334 kmem_cache_free(bo->bdev->bo_cache, bo);
335
336 return next_bo;
337 }
338
339 /*
340 * hmm_bo_device functions.
341 */
hmm_bo_device_init(struct hmm_bo_device * bdev,struct isp_mmu_client * mmu_driver,unsigned int vaddr_start,unsigned int size)342 int hmm_bo_device_init(struct hmm_bo_device *bdev,
343 struct isp_mmu_client *mmu_driver,
344 unsigned int vaddr_start,
345 unsigned int size)
346 {
347 struct hmm_buffer_object *bo;
348 unsigned long flags;
349 int ret;
350
351 check_bodev_null_return(bdev, -EINVAL);
352
353 ret = isp_mmu_init(&bdev->mmu, mmu_driver);
354 if (ret) {
355 dev_err(atomisp_dev, "isp_mmu_init failed.\n");
356 return ret;
357 }
358
359 bdev->start = vaddr_start;
360 bdev->pgnr = size_to_pgnr_ceil(size);
361 bdev->size = pgnr_to_size(bdev->pgnr);
362
363 spin_lock_init(&bdev->list_lock);
364 mutex_init(&bdev->rbtree_mutex);
365
366 bdev->flag = HMM_BO_DEVICE_INITED;
367
368 INIT_LIST_HEAD(&bdev->entire_bo_list);
369 bdev->allocated_rbtree = RB_ROOT;
370 bdev->free_rbtree = RB_ROOT;
371
372 bdev->bo_cache = kmem_cache_create("bo_cache",
373 sizeof(struct hmm_buffer_object), 0, 0, NULL);
374 if (!bdev->bo_cache) {
375 dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
376 isp_mmu_exit(&bdev->mmu);
377 return -ENOMEM;
378 }
379
380 bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
381 if (!bo) {
382 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
383 isp_mmu_exit(&bdev->mmu);
384 return -ENOMEM;
385 }
386
387 ret = __bo_init(bdev, bo, bdev->pgnr);
388 if (ret) {
389 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
390 kmem_cache_free(bdev->bo_cache, bo);
391 isp_mmu_exit(&bdev->mmu);
392 return -EINVAL;
393 }
394
395 spin_lock_irqsave(&bdev->list_lock, flags);
396 list_add_tail(&bo->list, &bdev->entire_bo_list);
397 spin_unlock_irqrestore(&bdev->list_lock, flags);
398
399 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
400
401 return 0;
402 }
403
hmm_bo_alloc(struct hmm_bo_device * bdev,unsigned int pgnr)404 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
405 unsigned int pgnr)
406 {
407 struct hmm_buffer_object *bo, *new_bo;
408 struct rb_root *root = &bdev->free_rbtree;
409
410 check_bodev_null_return(bdev, NULL);
411 var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
412 "hmm_bo_device not inited yet.\n");
413
414 if (pgnr == 0) {
415 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
416 return NULL;
417 }
418
419 mutex_lock(&bdev->rbtree_mutex);
420 bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
421 if (!bo) {
422 mutex_unlock(&bdev->rbtree_mutex);
423 dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
424 __func__);
425 return NULL;
426 }
427
428 if (bo->pgnr > pgnr) {
429 new_bo = __bo_break_up(bdev, bo, pgnr);
430 if (!new_bo) {
431 mutex_unlock(&bdev->rbtree_mutex);
432 dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
433 __func__);
434 return NULL;
435 }
436
437 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
438 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
439
440 mutex_unlock(&bdev->rbtree_mutex);
441 return new_bo;
442 }
443
444 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
445
446 mutex_unlock(&bdev->rbtree_mutex);
447 return bo;
448 }
449
hmm_bo_release(struct hmm_buffer_object * bo)450 void hmm_bo_release(struct hmm_buffer_object *bo)
451 {
452 struct hmm_bo_device *bdev = bo->bdev;
453 struct hmm_buffer_object *next_bo, *prev_bo;
454
455 mutex_lock(&bdev->rbtree_mutex);
456
457 /*
458 * FIX ME:
459 *
460 * how to destroy the bo when it is stilled MMAPED?
461 *
462 * ideally, this will not happened as hmm_bo_release
463 * will only be called when kref reaches 0, and in mmap
464 * operation the hmm_bo_ref will eventually be called.
465 * so, if this happened, something goes wrong.
466 */
467 if (bo->status & HMM_BO_MMAPED) {
468 mutex_unlock(&bdev->rbtree_mutex);
469 dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
470 return;
471 }
472
473 if (bo->status & HMM_BO_BINDED) {
474 dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
475 hmm_bo_unbind(bo);
476 }
477
478 if (bo->status & HMM_BO_PAGE_ALLOCED) {
479 dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
480 hmm_bo_free_pages(bo);
481 }
482 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
483 dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
484 hmm_bo_vunmap(bo);
485 }
486
487 rb_erase(&bo->node, &bdev->allocated_rbtree);
488
489 prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
490 next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
491
492 if (bo->list.prev != &bdev->entire_bo_list &&
493 prev_bo->end == bo->start &&
494 (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
495 __bo_take_off_handling(prev_bo);
496 bo = __bo_merge(prev_bo, bo);
497 }
498
499 if (bo->list.next != &bdev->entire_bo_list &&
500 next_bo->start == bo->end &&
501 (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
502 __bo_take_off_handling(next_bo);
503 bo = __bo_merge(bo, next_bo);
504 }
505
506 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
507
508 mutex_unlock(&bdev->rbtree_mutex);
509 return;
510 }
511
hmm_bo_device_exit(struct hmm_bo_device * bdev)512 void hmm_bo_device_exit(struct hmm_bo_device *bdev)
513 {
514 struct hmm_buffer_object *bo;
515 unsigned long flags;
516
517 dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
518
519 check_bodev_null_return_void(bdev);
520
521 /*
522 * release all allocated bos even they a in use
523 * and all bos will be merged into a big bo
524 */
525 while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
526 hmm_bo_release(
527 rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
528
529 dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
530 __func__);
531
532 /* free all bos to release all ISP virtual memory */
533 while (!list_empty(&bdev->entire_bo_list)) {
534 bo = list_to_hmm_bo(bdev->entire_bo_list.next);
535
536 spin_lock_irqsave(&bdev->list_lock, flags);
537 list_del(&bo->list);
538 spin_unlock_irqrestore(&bdev->list_lock, flags);
539
540 kmem_cache_free(bdev->bo_cache, bo);
541 }
542
543 dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
544
545 kmem_cache_destroy(bdev->bo_cache);
546
547 isp_mmu_exit(&bdev->mmu);
548 }
549
hmm_bo_device_inited(struct hmm_bo_device * bdev)550 int hmm_bo_device_inited(struct hmm_bo_device *bdev)
551 {
552 check_bodev_null_return(bdev, -EINVAL);
553
554 return bdev->flag == HMM_BO_DEVICE_INITED;
555 }
556
hmm_bo_allocated(struct hmm_buffer_object * bo)557 int hmm_bo_allocated(struct hmm_buffer_object *bo)
558 {
559 check_bo_null_return(bo, 0);
560
561 return bo->status & HMM_BO_ALLOCED;
562 }
563
hmm_bo_device_search_start(struct hmm_bo_device * bdev,ia_css_ptr vaddr)564 struct hmm_buffer_object *hmm_bo_device_search_start(
565 struct hmm_bo_device *bdev, ia_css_ptr vaddr)
566 {
567 struct hmm_buffer_object *bo;
568
569 check_bodev_null_return(bdev, NULL);
570
571 mutex_lock(&bdev->rbtree_mutex);
572 bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
573 if (!bo) {
574 mutex_unlock(&bdev->rbtree_mutex);
575 dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
576 __func__, vaddr);
577 return NULL;
578 }
579 mutex_unlock(&bdev->rbtree_mutex);
580
581 return bo;
582 }
583
hmm_bo_device_search_in_range(struct hmm_bo_device * bdev,unsigned int vaddr)584 struct hmm_buffer_object *hmm_bo_device_search_in_range(
585 struct hmm_bo_device *bdev, unsigned int vaddr)
586 {
587 struct hmm_buffer_object *bo;
588
589 check_bodev_null_return(bdev, NULL);
590
591 mutex_lock(&bdev->rbtree_mutex);
592 bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
593 if (!bo) {
594 mutex_unlock(&bdev->rbtree_mutex);
595 dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
596 __func__, vaddr);
597 return NULL;
598 }
599 mutex_unlock(&bdev->rbtree_mutex);
600
601 return bo;
602 }
603
hmm_bo_device_search_vmap_start(struct hmm_bo_device * bdev,const void * vaddr)604 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
605 struct hmm_bo_device *bdev, const void *vaddr)
606 {
607 struct list_head *pos;
608 struct hmm_buffer_object *bo;
609 unsigned long flags;
610
611 check_bodev_null_return(bdev, NULL);
612
613 spin_lock_irqsave(&bdev->list_lock, flags);
614 list_for_each(pos, &bdev->entire_bo_list) {
615 bo = list_to_hmm_bo(pos);
616 /* pass bo which has no vm_node allocated */
617 if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
618 continue;
619 if (bo->vmap_addr == vaddr)
620 goto found;
621 }
622 spin_unlock_irqrestore(&bdev->list_lock, flags);
623 return NULL;
624 found:
625 spin_unlock_irqrestore(&bdev->list_lock, flags);
626 return bo;
627 }
628
free_private_bo_pages(struct hmm_buffer_object * bo,struct hmm_pool * dypool,struct hmm_pool * repool,int free_pgnr)629 static void free_private_bo_pages(struct hmm_buffer_object *bo,
630 struct hmm_pool *dypool,
631 struct hmm_pool *repool,
632 int free_pgnr)
633 {
634 int i, ret;
635
636 for (i = 0; i < free_pgnr; i++) {
637 switch (bo->page_obj[i].type) {
638 case HMM_PAGE_TYPE_RESERVED:
639 if (repool->pops
640 && repool->pops->pool_free_pages) {
641 repool->pops->pool_free_pages(repool->pool_info,
642 &bo->page_obj[i]);
643 hmm_mem_stat.res_cnt--;
644 }
645 break;
646 /*
647 * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
648 * memory, so when free them, they should be put into dynamic
649 * pool.
650 */
651 case HMM_PAGE_TYPE_DYNAMIC:
652 case HMM_PAGE_TYPE_GENERAL:
653 if (dypool->pops
654 && dypool->pops->pool_inited
655 && dypool->pops->pool_inited(dypool->pool_info)) {
656 if (dypool->pops->pool_free_pages)
657 dypool->pops->pool_free_pages(
658 dypool->pool_info,
659 &bo->page_obj[i]);
660 break;
661 }
662
663 fallthrough;
664
665 /*
666 * if dynamic memory pool doesn't exist, need to free
667 * pages to system directly.
668 */
669 default:
670 ret = set_pages_wb(bo->page_obj[i].page, 1);
671 if (ret)
672 dev_err(atomisp_dev,
673 "set page to WB err ...ret = %d\n",
674 ret);
675 /*
676 W/A: set_pages_wb seldom return value = -EFAULT
677 indicate that address of page is not in valid
678 range(0xffff880000000000~0xffffc7ffffffffff)
679 then, _free_pages would panic; Do not know why page
680 address be valid,it maybe memory corruption by lowmemory
681 */
682 if (!ret) {
683 __free_pages(bo->page_obj[i].page, 0);
684 hmm_mem_stat.sys_size--;
685 }
686 break;
687 }
688 }
689
690 return;
691 }
692
693 /*Allocate pages which will be used only by ISP*/
alloc_private_pages(struct hmm_buffer_object * bo,int from_highmem,bool cached,struct hmm_pool * dypool,struct hmm_pool * repool)694 static int alloc_private_pages(struct hmm_buffer_object *bo,
695 int from_highmem,
696 bool cached,
697 struct hmm_pool *dypool,
698 struct hmm_pool *repool)
699 {
700 int ret;
701 unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
702 struct page *pages;
703 gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
704 int i, j;
705 int failure_number = 0;
706 bool reduce_order = false;
707 bool lack_mem = true;
708
709 if (from_highmem)
710 gfp |= __GFP_HIGHMEM;
711
712 pgnr = bo->pgnr;
713
714 bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
715 GFP_KERNEL);
716 if (unlikely(!bo->page_obj))
717 return -ENOMEM;
718
719 i = 0;
720 alloc_pgnr = 0;
721
722 /*
723 * get physical pages from dynamic pages pool.
724 */
725 if (dypool->pops && dypool->pops->pool_alloc_pages) {
726 alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
727 bo->page_obj, pgnr,
728 cached);
729 hmm_mem_stat.dyc_size -= alloc_pgnr;
730
731 if (alloc_pgnr == pgnr)
732 return 0;
733 }
734
735 pgnr -= alloc_pgnr;
736 i += alloc_pgnr;
737
738 /*
739 * get physical pages from reserved pages pool for atomisp.
740 */
741 if (repool->pops && repool->pops->pool_alloc_pages) {
742 alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
743 &bo->page_obj[i], pgnr,
744 cached);
745 hmm_mem_stat.res_cnt += alloc_pgnr;
746 if (alloc_pgnr == pgnr)
747 return 0;
748 }
749
750 pgnr -= alloc_pgnr;
751 i += alloc_pgnr;
752
753 while (pgnr) {
754 order = nr_to_order_bottom(pgnr);
755 /*
756 * if be short of memory, we will set order to 0
757 * everytime.
758 */
759 if (lack_mem)
760 order = HMM_MIN_ORDER;
761 else if (order > HMM_MAX_ORDER)
762 order = HMM_MAX_ORDER;
763 retry:
764 /*
765 * When order > HMM_MIN_ORDER, for performance reasons we don't
766 * want alloc_pages() to sleep. In case it fails and fallbacks
767 * to HMM_MIN_ORDER or in case the requested order is originally
768 * the minimum value, we can allow alloc_pages() to sleep for
769 * robustness purpose.
770 *
771 * REVISIT: why __GFP_FS is necessary?
772 */
773 if (order == HMM_MIN_ORDER) {
774 gfp &= ~GFP_NOWAIT;
775 gfp |= __GFP_RECLAIM | __GFP_FS;
776 }
777
778 pages = alloc_pages(gfp, order);
779 if (unlikely(!pages)) {
780 /*
781 * in low memory case, if allocation page fails,
782 * we turn to try if order=0 allocation could
783 * succeed. if order=0 fails too, that means there is
784 * no memory left.
785 */
786 if (order == HMM_MIN_ORDER) {
787 dev_err(atomisp_dev,
788 "%s: cannot allocate pages\n",
789 __func__);
790 goto cleanup;
791 }
792 order = HMM_MIN_ORDER;
793 failure_number++;
794 reduce_order = true;
795 /*
796 * if fail two times continuously, we think be short
797 * of memory now.
798 */
799 if (failure_number == 2) {
800 lack_mem = true;
801 failure_number = 0;
802 }
803 goto retry;
804 } else {
805 blk_pgnr = order_to_nr(order);
806
807 if (!cached) {
808 /*
809 * set memory to uncacheable -- UC_MINUS
810 */
811 ret = set_pages_uc(pages, blk_pgnr);
812 if (ret) {
813 dev_err(atomisp_dev,
814 "set page uncacheablefailed.\n");
815
816 __free_pages(pages, order);
817
818 goto cleanup;
819 }
820 }
821
822 for (j = 0; j < blk_pgnr; j++) {
823 bo->page_obj[i].page = pages + j;
824 bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
825 }
826
827 pgnr -= blk_pgnr;
828 hmm_mem_stat.sys_size += blk_pgnr;
829
830 /*
831 * if order is not reduced this time, clear
832 * failure_number.
833 */
834 if (reduce_order)
835 reduce_order = false;
836 else
837 failure_number = 0;
838 }
839 }
840
841 return 0;
842 cleanup:
843 alloc_pgnr = i;
844 free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
845
846 kfree(bo->page_obj);
847
848 return -ENOMEM;
849 }
850
free_private_pages(struct hmm_buffer_object * bo,struct hmm_pool * dypool,struct hmm_pool * repool)851 static void free_private_pages(struct hmm_buffer_object *bo,
852 struct hmm_pool *dypool,
853 struct hmm_pool *repool)
854 {
855 free_private_bo_pages(bo, dypool, repool, bo->pgnr);
856
857 kfree(bo->page_obj);
858 }
859
free_user_pages(struct hmm_buffer_object * bo)860 static void free_user_pages(struct hmm_buffer_object *bo)
861 {
862 int i;
863
864 hmm_mem_stat.usr_size -= bo->pgnr;
865
866 if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) {
867 unpin_user_pages(bo->pages, bo->pgnr);
868 } else {
869 for (i = 0; i < bo->pgnr; i++)
870 put_page(bo->pages[i]);
871 }
872 kfree(bo->pages);
873 kfree(bo->page_obj);
874 }
875
876 /*
877 * Convert user space virtual address into pages list
878 */
alloc_user_pages(struct hmm_buffer_object * bo,const void __user * userptr,bool cached)879 static int alloc_user_pages(struct hmm_buffer_object *bo,
880 const void __user *userptr, bool cached)
881 {
882 int page_nr;
883 int i;
884 struct vm_area_struct *vma;
885 struct page **pages;
886
887 pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
888 if (unlikely(!pages))
889 return -ENOMEM;
890
891 bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
892 GFP_KERNEL);
893 if (unlikely(!bo->page_obj)) {
894 kfree(pages);
895 return -ENOMEM;
896 }
897
898 mutex_unlock(&bo->mutex);
899 mmap_read_lock(current->mm);
900 vma = find_vma(current->mm, (unsigned long)userptr);
901 mmap_read_unlock(current->mm);
902 if (!vma) {
903 dev_err(atomisp_dev, "find_vma failed\n");
904 kfree(bo->page_obj);
905 kfree(pages);
906 mutex_lock(&bo->mutex);
907 return -EFAULT;
908 }
909 mutex_lock(&bo->mutex);
910 /*
911 * Handle frame buffer allocated in other kerenl space driver
912 * and map to user space
913 */
914
915 userptr = untagged_addr(userptr);
916
917 bo->pages = pages;
918
919 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
920 page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr,
921 FOLL_LONGTERM | FOLL_WRITE,
922 pages, NULL);
923 bo->mem_type = HMM_BO_MEM_TYPE_PFN;
924 } else {
925 /*Handle frame buffer allocated in user space*/
926 mutex_unlock(&bo->mutex);
927 page_nr = get_user_pages_fast((unsigned long)userptr,
928 (int)(bo->pgnr), 1, pages);
929 mutex_lock(&bo->mutex);
930 bo->mem_type = HMM_BO_MEM_TYPE_USER;
931 }
932
933 dev_dbg(atomisp_dev, "%s: %d %s pages were allocated as 0x%08x\n",
934 __func__,
935 bo->pgnr,
936 bo->mem_type == HMM_BO_MEM_TYPE_USER ? "user" : "pfn", page_nr);
937
938 hmm_mem_stat.usr_size += bo->pgnr;
939
940 /* can be written by caller, not forced */
941 if (page_nr != bo->pgnr) {
942 dev_err(atomisp_dev,
943 "get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n",
944 bo->pgnr, page_nr);
945 goto out_of_mem;
946 }
947
948 for (i = 0; i < bo->pgnr; i++) {
949 bo->page_obj[i].page = pages[i];
950 bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
951 }
952
953 return 0;
954
955 out_of_mem:
956
957 free_user_pages(bo);
958
959 return -ENOMEM;
960 }
961
962 /*
963 * allocate/free physical pages for the bo.
964 *
965 * type indicate where are the pages from. currently we have 3 types
966 * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
967 *
968 * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
969 * try to alloc memory from highmem if from_highmem is set.
970 *
971 * userptr is only valid when type is HMM_BO_USER, it indicates
972 * the start address from user space task.
973 *
974 * from_highmem and userptr will both be ignored when type is
975 * HMM_BO_SHARE.
976 */
hmm_bo_alloc_pages(struct hmm_buffer_object * bo,enum hmm_bo_type type,int from_highmem,const void __user * userptr,bool cached)977 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
978 enum hmm_bo_type type, int from_highmem,
979 const void __user *userptr, bool cached)
980 {
981 int ret = -EINVAL;
982
983 check_bo_null_return(bo, -EINVAL);
984
985 mutex_lock(&bo->mutex);
986 check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
987
988 /*
989 * TO DO:
990 * add HMM_BO_USER type
991 */
992 if (type == HMM_BO_PRIVATE) {
993 ret = alloc_private_pages(bo, from_highmem,
994 cached, &dynamic_pool, &reserved_pool);
995 } else if (type == HMM_BO_USER) {
996 ret = alloc_user_pages(bo, userptr, cached);
997 } else {
998 dev_err(atomisp_dev, "invalid buffer type.\n");
999 ret = -EINVAL;
1000 }
1001 if (ret)
1002 goto alloc_err;
1003
1004 bo->type = type;
1005
1006 bo->status |= HMM_BO_PAGE_ALLOCED;
1007
1008 mutex_unlock(&bo->mutex);
1009
1010 return 0;
1011
1012 alloc_err:
1013 mutex_unlock(&bo->mutex);
1014 dev_err(atomisp_dev, "alloc pages err...\n");
1015 return ret;
1016 status_err:
1017 mutex_unlock(&bo->mutex);
1018 dev_err(atomisp_dev,
1019 "buffer object has already page allocated.\n");
1020 return -EINVAL;
1021 }
1022
1023 /*
1024 * free physical pages of the bo.
1025 */
hmm_bo_free_pages(struct hmm_buffer_object * bo)1026 void hmm_bo_free_pages(struct hmm_buffer_object *bo)
1027 {
1028 check_bo_null_return_void(bo);
1029
1030 mutex_lock(&bo->mutex);
1031
1032 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
1033
1034 /* clear the flag anyway. */
1035 bo->status &= (~HMM_BO_PAGE_ALLOCED);
1036
1037 if (bo->type == HMM_BO_PRIVATE)
1038 free_private_pages(bo, &dynamic_pool, &reserved_pool);
1039 else if (bo->type == HMM_BO_USER)
1040 free_user_pages(bo);
1041 else
1042 dev_err(atomisp_dev, "invalid buffer type.\n");
1043 mutex_unlock(&bo->mutex);
1044
1045 return;
1046
1047 status_err2:
1048 mutex_unlock(&bo->mutex);
1049 dev_err(atomisp_dev,
1050 "buffer object not page allocated yet.\n");
1051 }
1052
hmm_bo_page_allocated(struct hmm_buffer_object * bo)1053 int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
1054 {
1055 check_bo_null_return(bo, 0);
1056
1057 return bo->status & HMM_BO_PAGE_ALLOCED;
1058 }
1059
1060 /*
1061 * get physical page info of the bo.
1062 */
hmm_bo_get_page_info(struct hmm_buffer_object * bo,struct hmm_page_object ** page_obj,int * pgnr)1063 int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
1064 struct hmm_page_object **page_obj, int *pgnr)
1065 {
1066 check_bo_null_return(bo, -EINVAL);
1067
1068 mutex_lock(&bo->mutex);
1069
1070 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1071
1072 *page_obj = bo->page_obj;
1073 *pgnr = bo->pgnr;
1074
1075 mutex_unlock(&bo->mutex);
1076
1077 return 0;
1078
1079 status_err:
1080 dev_err(atomisp_dev,
1081 "buffer object not page allocated yet.\n");
1082 mutex_unlock(&bo->mutex);
1083 return -EINVAL;
1084 }
1085
1086 /*
1087 * bind the physical pages to a virtual address space.
1088 */
hmm_bo_bind(struct hmm_buffer_object * bo)1089 int hmm_bo_bind(struct hmm_buffer_object *bo)
1090 {
1091 int ret;
1092 unsigned int virt;
1093 struct hmm_bo_device *bdev;
1094 unsigned int i;
1095
1096 check_bo_null_return(bo, -EINVAL);
1097
1098 mutex_lock(&bo->mutex);
1099
1100 check_bo_status_yes_goto(bo,
1101 HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
1102 status_err1);
1103
1104 check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
1105
1106 bdev = bo->bdev;
1107
1108 virt = bo->start;
1109
1110 for (i = 0; i < bo->pgnr; i++) {
1111 ret =
1112 isp_mmu_map(&bdev->mmu, virt,
1113 page_to_phys(bo->page_obj[i].page), 1);
1114 if (ret)
1115 goto map_err;
1116 virt += (1 << PAGE_SHIFT);
1117 }
1118
1119 /*
1120 * flush TBL here.
1121 *
1122 * theoretically, we donot need to flush TLB as we didnot change
1123 * any existed address mappings, but for Silicon Hive's MMU, its
1124 * really a bug here. I guess when fetching PTEs (page table entity)
1125 * to TLB, its MMU will fetch additional INVALID PTEs automatically
1126 * for performance issue. EX, we only set up 1 page address mapping,
1127 * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
1128 * so the additional 3 PTEs are invalid.
1129 */
1130 if (bo->start != 0x0)
1131 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1132 (bo->pgnr << PAGE_SHIFT));
1133
1134 bo->status |= HMM_BO_BINDED;
1135
1136 mutex_unlock(&bo->mutex);
1137
1138 return 0;
1139
1140 map_err:
1141 /* unbind the physical pages with related virtual address space */
1142 virt = bo->start;
1143 for ( ; i > 0; i--) {
1144 isp_mmu_unmap(&bdev->mmu, virt, 1);
1145 virt += pgnr_to_size(1);
1146 }
1147
1148 mutex_unlock(&bo->mutex);
1149 dev_err(atomisp_dev,
1150 "setup MMU address mapping failed.\n");
1151 return ret;
1152
1153 status_err2:
1154 mutex_unlock(&bo->mutex);
1155 dev_err(atomisp_dev, "buffer object already binded.\n");
1156 return -EINVAL;
1157 status_err1:
1158 mutex_unlock(&bo->mutex);
1159 dev_err(atomisp_dev,
1160 "buffer object vm_node or page not allocated.\n");
1161 return -EINVAL;
1162 }
1163
1164 /*
1165 * unbind the physical pages with related virtual address space.
1166 */
hmm_bo_unbind(struct hmm_buffer_object * bo)1167 void hmm_bo_unbind(struct hmm_buffer_object *bo)
1168 {
1169 unsigned int virt;
1170 struct hmm_bo_device *bdev;
1171 unsigned int i;
1172
1173 check_bo_null_return_void(bo);
1174
1175 mutex_lock(&bo->mutex);
1176
1177 check_bo_status_yes_goto(bo,
1178 HMM_BO_PAGE_ALLOCED |
1179 HMM_BO_ALLOCED |
1180 HMM_BO_BINDED, status_err);
1181
1182 bdev = bo->bdev;
1183
1184 virt = bo->start;
1185
1186 for (i = 0; i < bo->pgnr; i++) {
1187 isp_mmu_unmap(&bdev->mmu, virt, 1);
1188 virt += pgnr_to_size(1);
1189 }
1190
1191 /*
1192 * flush TLB as the address mapping has been removed and
1193 * related TLBs should be invalidated.
1194 */
1195 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1196 (bo->pgnr << PAGE_SHIFT));
1197
1198 bo->status &= (~HMM_BO_BINDED);
1199
1200 mutex_unlock(&bo->mutex);
1201
1202 return;
1203
1204 status_err:
1205 mutex_unlock(&bo->mutex);
1206 dev_err(atomisp_dev,
1207 "buffer vm or page not allocated or not binded yet.\n");
1208 }
1209
hmm_bo_binded(struct hmm_buffer_object * bo)1210 int hmm_bo_binded(struct hmm_buffer_object *bo)
1211 {
1212 int ret;
1213
1214 check_bo_null_return(bo, 0);
1215
1216 mutex_lock(&bo->mutex);
1217
1218 ret = bo->status & HMM_BO_BINDED;
1219
1220 mutex_unlock(&bo->mutex);
1221
1222 return ret;
1223 }
1224
hmm_bo_vmap(struct hmm_buffer_object * bo,bool cached)1225 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
1226 {
1227 struct page **pages;
1228 int i;
1229
1230 check_bo_null_return(bo, NULL);
1231
1232 mutex_lock(&bo->mutex);
1233 if (((bo->status & HMM_BO_VMAPED) && !cached) ||
1234 ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
1235 mutex_unlock(&bo->mutex);
1236 return bo->vmap_addr;
1237 }
1238
1239 /* cached status need to be changed, so vunmap first */
1240 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1241 vunmap(bo->vmap_addr);
1242 bo->vmap_addr = NULL;
1243 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1244 }
1245
1246 pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
1247 if (unlikely(!pages)) {
1248 mutex_unlock(&bo->mutex);
1249 return NULL;
1250 }
1251
1252 for (i = 0; i < bo->pgnr; i++)
1253 pages[i] = bo->page_obj[i].page;
1254
1255 bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
1256 cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
1257 if (unlikely(!bo->vmap_addr)) {
1258 kfree(pages);
1259 mutex_unlock(&bo->mutex);
1260 dev_err(atomisp_dev, "vmap failed...\n");
1261 return NULL;
1262 }
1263 bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
1264
1265 kfree(pages);
1266
1267 mutex_unlock(&bo->mutex);
1268 return bo->vmap_addr;
1269 }
1270
hmm_bo_flush_vmap(struct hmm_buffer_object * bo)1271 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
1272 {
1273 check_bo_null_return_void(bo);
1274
1275 mutex_lock(&bo->mutex);
1276 if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
1277 mutex_unlock(&bo->mutex);
1278 return;
1279 }
1280
1281 clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
1282 mutex_unlock(&bo->mutex);
1283 }
1284
hmm_bo_vunmap(struct hmm_buffer_object * bo)1285 void hmm_bo_vunmap(struct hmm_buffer_object *bo)
1286 {
1287 check_bo_null_return_void(bo);
1288
1289 mutex_lock(&bo->mutex);
1290 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1291 vunmap(bo->vmap_addr);
1292 bo->vmap_addr = NULL;
1293 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1294 }
1295
1296 mutex_unlock(&bo->mutex);
1297 return;
1298 }
1299
hmm_bo_ref(struct hmm_buffer_object * bo)1300 void hmm_bo_ref(struct hmm_buffer_object *bo)
1301 {
1302 check_bo_null_return_void(bo);
1303
1304 kref_get(&bo->kref);
1305 }
1306
kref_hmm_bo_release(struct kref * kref)1307 static void kref_hmm_bo_release(struct kref *kref)
1308 {
1309 if (!kref)
1310 return;
1311
1312 hmm_bo_release(kref_to_hmm_bo(kref));
1313 }
1314
hmm_bo_unref(struct hmm_buffer_object * bo)1315 void hmm_bo_unref(struct hmm_buffer_object *bo)
1316 {
1317 check_bo_null_return_void(bo);
1318
1319 kref_put(&bo->kref, kref_hmm_bo_release);
1320 }
1321
hmm_bo_vm_open(struct vm_area_struct * vma)1322 static void hmm_bo_vm_open(struct vm_area_struct *vma)
1323 {
1324 struct hmm_buffer_object *bo =
1325 (struct hmm_buffer_object *)vma->vm_private_data;
1326
1327 check_bo_null_return_void(bo);
1328
1329 hmm_bo_ref(bo);
1330
1331 mutex_lock(&bo->mutex);
1332
1333 bo->status |= HMM_BO_MMAPED;
1334
1335 bo->mmap_count++;
1336
1337 mutex_unlock(&bo->mutex);
1338 }
1339
hmm_bo_vm_close(struct vm_area_struct * vma)1340 static void hmm_bo_vm_close(struct vm_area_struct *vma)
1341 {
1342 struct hmm_buffer_object *bo =
1343 (struct hmm_buffer_object *)vma->vm_private_data;
1344
1345 check_bo_null_return_void(bo);
1346
1347 hmm_bo_unref(bo);
1348
1349 mutex_lock(&bo->mutex);
1350
1351 bo->mmap_count--;
1352
1353 if (!bo->mmap_count) {
1354 bo->status &= (~HMM_BO_MMAPED);
1355 vma->vm_private_data = NULL;
1356 }
1357
1358 mutex_unlock(&bo->mutex);
1359 }
1360
1361 static const struct vm_operations_struct hmm_bo_vm_ops = {
1362 .open = hmm_bo_vm_open,
1363 .close = hmm_bo_vm_close,
1364 };
1365
1366 /*
1367 * mmap the bo to user space.
1368 */
hmm_bo_mmap(struct vm_area_struct * vma,struct hmm_buffer_object * bo)1369 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1370 {
1371 unsigned int start, end;
1372 unsigned int virt;
1373 unsigned int pgnr, i;
1374 unsigned int pfn;
1375
1376 check_bo_null_return(bo, -EINVAL);
1377
1378 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1379
1380 pgnr = bo->pgnr;
1381 start = vma->vm_start;
1382 end = vma->vm_end;
1383
1384 /*
1385 * check vma's virtual address space size and buffer object's size.
1386 * must be the same.
1387 */
1388 if ((start + pgnr_to_size(pgnr)) != end) {
1389 dev_warn(atomisp_dev,
1390 "vma's address space size not equal to buffer object's size");
1391 return -EINVAL;
1392 }
1393
1394 virt = vma->vm_start;
1395 for (i = 0; i < pgnr; i++) {
1396 pfn = page_to_pfn(bo->page_obj[i].page);
1397 if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1398 dev_warn(atomisp_dev,
1399 "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",
1400 virt, pfn, 1);
1401 return -EINVAL;
1402 }
1403 virt += PAGE_SIZE;
1404 }
1405
1406 vma->vm_private_data = bo;
1407
1408 vma->vm_ops = &hmm_bo_vm_ops;
1409 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
1410
1411 /*
1412 * call hmm_bo_vm_open explicitly.
1413 */
1414 hmm_bo_vm_open(vma);
1415
1416 return 0;
1417
1418 status_err:
1419 dev_err(atomisp_dev, "buffer page not allocated yet.\n");
1420 return -EINVAL;
1421 }
1422