1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 *
5 * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
6 *
7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 *
19 */
20 /*
21 * This file contains entry functions for memory management of ISP driver
22 */
23 #include <linux/kernel.h>
24 #include <linux/types.h>
25 #include <linux/mm.h>
26 #include <linux/highmem.h> /* for kmap */
27 #include <linux/io.h> /* for page_to_phys */
28 #include <linux/sysfs.h>
29
30 #include "hmm/hmm.h"
31 #include "hmm/hmm_pool.h"
32 #include "hmm/hmm_bo.h"
33
34 #include "atomisp_internal.h"
35 #include "asm/cacheflush.h"
36 #include "mmu/isp_mmu.h"
37 #include "mmu/sh_mmu_mrfld.h"
38
39 struct hmm_bo_device bo_device;
40 struct hmm_pool dynamic_pool;
41 struct hmm_pool reserved_pool;
42 static ia_css_ptr dummy_ptr;
43 static bool hmm_initialized;
44 struct _hmm_mem_stat hmm_mem_stat;
45
46 /*
47 * p: private
48 * s: shared
49 * u: user
50 * i: ion
51 */
52 static const char hmm_bo_type_string[] = "psui";
53
bo_show(struct device * dev,struct device_attribute * attr,char * buf,struct list_head * bo_list,bool active)54 static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
55 char *buf, struct list_head *bo_list, bool active)
56 {
57 ssize_t ret = 0;
58 struct hmm_buffer_object *bo;
59 unsigned long flags;
60 int i;
61 long total[HMM_BO_LAST] = { 0 };
62 long count[HMM_BO_LAST] = { 0 };
63 int index1 = 0;
64 int index2 = 0;
65
66 ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n");
67 if (ret <= 0)
68 return 0;
69
70 index1 += ret;
71
72 spin_lock_irqsave(&bo_device.list_lock, flags);
73 list_for_each_entry(bo, bo_list, list) {
74 if ((active && (bo->status & HMM_BO_ALLOCED)) ||
75 (!active && !(bo->status & HMM_BO_ALLOCED))) {
76 ret = scnprintf(buf + index1, PAGE_SIZE - index1,
77 "%c %d\n",
78 hmm_bo_type_string[bo->type], bo->pgnr);
79
80 total[bo->type] += bo->pgnr;
81 count[bo->type]++;
82 if (ret > 0)
83 index1 += ret;
84 }
85 }
86 spin_unlock_irqrestore(&bo_device.list_lock, flags);
87
88 for (i = 0; i < HMM_BO_LAST; i++) {
89 if (count[i]) {
90 ret = scnprintf(buf + index1 + index2,
91 PAGE_SIZE - index1 - index2,
92 "%ld %c buffer objects: %ld KB\n",
93 count[i], hmm_bo_type_string[i],
94 total[i] * 4);
95 if (ret > 0)
96 index2 += ret;
97 }
98 }
99
100 /* Add trailing zero, not included by scnprintf */
101 return index1 + index2 + 1;
102 }
103
active_bo_show(struct device * dev,struct device_attribute * attr,char * buf)104 static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr,
105 char *buf)
106 {
107 return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
108 }
109
free_bo_show(struct device * dev,struct device_attribute * attr,char * buf)110 static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr,
111 char *buf)
112 {
113 return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
114 }
115
reserved_pool_show(struct device * dev,struct device_attribute * attr,char * buf)116 static ssize_t reserved_pool_show(struct device *dev,
117 struct device_attribute *attr,
118 char *buf)
119 {
120 ssize_t ret = 0;
121
122 struct hmm_reserved_pool_info *pinfo = reserved_pool.pool_info;
123 unsigned long flags;
124
125 if (!pinfo || !pinfo->initialized)
126 return 0;
127
128 spin_lock_irqsave(&pinfo->list_lock, flags);
129 ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n",
130 pinfo->index, pinfo->pgnr);
131 spin_unlock_irqrestore(&pinfo->list_lock, flags);
132
133 if (ret > 0)
134 ret++; /* Add trailing zero, not included by scnprintf */
135
136 return ret;
137 };
138
dynamic_pool_show(struct device * dev,struct device_attribute * attr,char * buf)139 static ssize_t dynamic_pool_show(struct device *dev,
140 struct device_attribute *attr,
141 char *buf)
142 {
143 ssize_t ret = 0;
144
145 struct hmm_dynamic_pool_info *pinfo = dynamic_pool.pool_info;
146 unsigned long flags;
147
148 if (!pinfo || !pinfo->initialized)
149 return 0;
150
151 spin_lock_irqsave(&pinfo->list_lock, flags);
152 ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n",
153 pinfo->pgnr, pinfo->pool_size);
154 spin_unlock_irqrestore(&pinfo->list_lock, flags);
155
156 if (ret > 0)
157 ret++; /* Add trailing zero, not included by scnprintf */
158
159 return ret;
160 };
161
162 static DEVICE_ATTR_RO(active_bo);
163 static DEVICE_ATTR_RO(free_bo);
164 static DEVICE_ATTR_RO(reserved_pool);
165 static DEVICE_ATTR_RO(dynamic_pool);
166
167 static struct attribute *sysfs_attrs_ctrl[] = {
168 &dev_attr_active_bo.attr,
169 &dev_attr_free_bo.attr,
170 &dev_attr_reserved_pool.attr,
171 &dev_attr_dynamic_pool.attr,
172 NULL
173 };
174
175 static struct attribute_group atomisp_attribute_group[] = {
176 {.attrs = sysfs_attrs_ctrl },
177 };
178
hmm_init(void)179 int hmm_init(void)
180 {
181 int ret;
182
183 ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
184 ISP_VM_START, ISP_VM_SIZE);
185 if (ret)
186 dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
187
188 hmm_initialized = true;
189
190 /*
191 * As hmm use NULL to indicate invalid ISP virtual address,
192 * and ISP_VM_START is defined to 0 too, so we allocate
193 * one piece of dummy memory, which should return value 0,
194 * at the beginning, to avoid hmm_alloc return 0 in the
195 * further allocation.
196 */
197 dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, NULL, 0);
198
199 if (!ret) {
200 ret = sysfs_create_group(&atomisp_dev->kobj,
201 atomisp_attribute_group);
202 if (ret)
203 dev_err(atomisp_dev,
204 "%s Failed to create sysfs\n", __func__);
205 }
206
207 return ret;
208 }
209
hmm_cleanup(void)210 void hmm_cleanup(void)
211 {
212 if (!dummy_ptr)
213 return;
214 sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
215
216 /* free dummy memory first */
217 hmm_free(dummy_ptr);
218 dummy_ptr = 0;
219
220 hmm_bo_device_exit(&bo_device);
221 hmm_initialized = false;
222 }
223
hmm_alloc(size_t bytes,enum hmm_bo_type type,int from_highmem,const void __user * userptr,const uint16_t attrs)224 ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
225 int from_highmem, const void __user *userptr,
226 const uint16_t attrs)
227 {
228 unsigned int pgnr;
229 struct hmm_buffer_object *bo;
230 bool cached = attrs & ATOMISP_MAP_FLAG_CACHED;
231 int ret;
232
233 WARN_ON(attrs & ATOMISP_MAP_FLAG_CONTIGUOUS);
234
235 /*
236 * Check if we are initialized. In the ideal world we wouldn't need
237 * this but we can tackle it once the driver is a lot cleaner
238 */
239
240 if (!hmm_initialized)
241 hmm_init();
242 /* Get page number from size */
243 pgnr = size_to_pgnr_ceil(bytes);
244
245 /* Buffer object structure init */
246 bo = hmm_bo_alloc(&bo_device, pgnr);
247 if (!bo) {
248 dev_err(atomisp_dev, "hmm_bo_create failed.\n");
249 goto create_bo_err;
250 }
251
252 /* Allocate pages for memory */
253 ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached);
254 if (ret) {
255 dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
256 goto alloc_page_err;
257 }
258
259 /* Combine the virtual address and pages together */
260 ret = hmm_bo_bind(bo);
261 if (ret) {
262 dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
263 goto bind_err;
264 }
265
266 hmm_mem_stat.tol_cnt += pgnr;
267
268 if (attrs & ATOMISP_MAP_FLAG_CLEARED)
269 hmm_set(bo->start, 0, bytes);
270
271 dev_dbg(atomisp_dev,
272 "%s: pages: 0x%08x (%ld bytes), type: %d from highmem %d, user ptr %p, cached %d\n",
273 __func__, bo->start, bytes, type, from_highmem, userptr, cached);
274
275 return bo->start;
276
277 bind_err:
278 hmm_bo_free_pages(bo);
279 alloc_page_err:
280 hmm_bo_unref(bo);
281 create_bo_err:
282 return 0;
283 }
284
hmm_free(ia_css_ptr virt)285 void hmm_free(ia_css_ptr virt)
286 {
287 struct hmm_buffer_object *bo;
288
289 dev_dbg(atomisp_dev, "%s: free 0x%08x\n", __func__, virt);
290
291 WARN_ON(!virt);
292
293 bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
294
295 if (!bo) {
296 dev_err(atomisp_dev,
297 "can not find buffer object start with address 0x%x\n",
298 (unsigned int)virt);
299 return;
300 }
301
302 hmm_mem_stat.tol_cnt -= bo->pgnr;
303
304 hmm_bo_unbind(bo);
305 hmm_bo_free_pages(bo);
306 hmm_bo_unref(bo);
307 }
308
hmm_check_bo(struct hmm_buffer_object * bo,unsigned int ptr)309 static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
310 {
311 if (!bo) {
312 dev_err(atomisp_dev,
313 "can not find buffer object contains address 0x%x\n",
314 ptr);
315 return -EINVAL;
316 }
317
318 if (!hmm_bo_page_allocated(bo)) {
319 dev_err(atomisp_dev,
320 "buffer object has no page allocated.\n");
321 return -EINVAL;
322 }
323
324 if (!hmm_bo_allocated(bo)) {
325 dev_err(atomisp_dev,
326 "buffer object has no virtual address space allocated.\n");
327 return -EINVAL;
328 }
329
330 return 0;
331 }
332
333 /* Read function in ISP memory management */
load_and_flush_by_kmap(ia_css_ptr virt,void * data,unsigned int bytes)334 static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
335 unsigned int bytes)
336 {
337 struct hmm_buffer_object *bo;
338 unsigned int idx, offset, len;
339 char *src, *des;
340 int ret;
341
342 bo = hmm_bo_device_search_in_range(&bo_device, virt);
343 ret = hmm_check_bo(bo, virt);
344 if (ret)
345 return ret;
346
347 des = (char *)data;
348 while (bytes) {
349 idx = (virt - bo->start) >> PAGE_SHIFT;
350 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
351
352 src = (char *)kmap(bo->page_obj[idx].page) + offset;
353
354 if ((bytes + offset) >= PAGE_SIZE) {
355 len = PAGE_SIZE - offset;
356 bytes -= len;
357 } else {
358 len = bytes;
359 bytes = 0;
360 }
361
362 virt += len; /* update virt for next loop */
363
364 if (des) {
365 memcpy(des, src, len);
366 des += len;
367 }
368
369 clflush_cache_range(src, len);
370
371 kunmap(bo->page_obj[idx].page);
372 }
373
374 return 0;
375 }
376
377 /* Read function in ISP memory management */
load_and_flush(ia_css_ptr virt,void * data,unsigned int bytes)378 static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
379 {
380 struct hmm_buffer_object *bo;
381 int ret;
382
383 bo = hmm_bo_device_search_in_range(&bo_device, virt);
384 ret = hmm_check_bo(bo, virt);
385 if (ret)
386 return ret;
387
388 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
389 void *src = bo->vmap_addr;
390
391 src += (virt - bo->start);
392 memcpy(data, src, bytes);
393 if (bo->status & HMM_BO_VMAPED_CACHED)
394 clflush_cache_range(src, bytes);
395 } else {
396 void *vptr;
397
398 vptr = hmm_bo_vmap(bo, true);
399 if (!vptr)
400 return load_and_flush_by_kmap(virt, data, bytes);
401 else
402 vptr = vptr + (virt - bo->start);
403
404 memcpy(data, vptr, bytes);
405 clflush_cache_range(vptr, bytes);
406 hmm_bo_vunmap(bo);
407 }
408
409 return 0;
410 }
411
412 /* Read function in ISP memory management */
hmm_load(ia_css_ptr virt,void * data,unsigned int bytes)413 int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
414 {
415 if (!virt) {
416 dev_warn(atomisp_dev,
417 "hmm_store: address is NULL\n");
418 return -EINVAL;
419 }
420 if (!data) {
421 dev_err(atomisp_dev,
422 "hmm_store: data is a NULL argument\n");
423 return -EINVAL;
424 }
425 return load_and_flush(virt, data, bytes);
426 }
427
428 /* Flush hmm data from the data cache */
hmm_flush(ia_css_ptr virt,unsigned int bytes)429 int hmm_flush(ia_css_ptr virt, unsigned int bytes)
430 {
431 return load_and_flush(virt, NULL, bytes);
432 }
433
434 /* Write function in ISP memory management */
hmm_store(ia_css_ptr virt,const void * data,unsigned int bytes)435 int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
436 {
437 struct hmm_buffer_object *bo;
438 unsigned int idx, offset, len;
439 char *src, *des;
440 int ret;
441
442 if (!virt) {
443 dev_warn(atomisp_dev,
444 "hmm_store: address is NULL\n");
445 return -EINVAL;
446 }
447 if (!data) {
448 dev_err(atomisp_dev,
449 "hmm_store: data is a NULL argument\n");
450 return -EINVAL;
451 }
452
453 bo = hmm_bo_device_search_in_range(&bo_device, virt);
454 ret = hmm_check_bo(bo, virt);
455 if (ret)
456 return ret;
457
458 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
459 void *dst = bo->vmap_addr;
460
461 dst += (virt - bo->start);
462 memcpy(dst, data, bytes);
463 if (bo->status & HMM_BO_VMAPED_CACHED)
464 clflush_cache_range(dst, bytes);
465 } else {
466 void *vptr;
467
468 vptr = hmm_bo_vmap(bo, true);
469 if (vptr) {
470 vptr = vptr + (virt - bo->start);
471
472 memcpy(vptr, data, bytes);
473 clflush_cache_range(vptr, bytes);
474 hmm_bo_vunmap(bo);
475 return 0;
476 }
477 }
478
479 src = (char *)data;
480 while (bytes) {
481 idx = (virt - bo->start) >> PAGE_SHIFT;
482 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
483
484 if (in_atomic())
485 des = (char *)kmap_atomic(bo->page_obj[idx].page);
486 else
487 des = (char *)kmap(bo->page_obj[idx].page);
488
489 if (!des) {
490 dev_err(atomisp_dev,
491 "kmap buffer object page failed: pg_idx = %d\n",
492 idx);
493 return -EINVAL;
494 }
495
496 des += offset;
497
498 if ((bytes + offset) >= PAGE_SIZE) {
499 len = PAGE_SIZE - offset;
500 bytes -= len;
501 } else {
502 len = bytes;
503 bytes = 0;
504 }
505
506 virt += len;
507
508 memcpy(des, src, len);
509
510 src += len;
511
512 clflush_cache_range(des, len);
513
514 if (in_atomic())
515 /*
516 * Note: kunmap_atomic requires return addr from
517 * kmap_atomic, not the page. See linux/highmem.h
518 */
519 kunmap_atomic(des - offset);
520 else
521 kunmap(bo->page_obj[idx].page);
522 }
523
524 return 0;
525 }
526
527 /* memset function in ISP memory management */
hmm_set(ia_css_ptr virt,int c,unsigned int bytes)528 int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
529 {
530 struct hmm_buffer_object *bo;
531 unsigned int idx, offset, len;
532 char *des;
533 int ret;
534
535 bo = hmm_bo_device_search_in_range(&bo_device, virt);
536 ret = hmm_check_bo(bo, virt);
537 if (ret)
538 return ret;
539
540 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
541 void *dst = bo->vmap_addr;
542
543 dst += (virt - bo->start);
544 memset(dst, c, bytes);
545
546 if (bo->status & HMM_BO_VMAPED_CACHED)
547 clflush_cache_range(dst, bytes);
548 } else {
549 void *vptr;
550
551 vptr = hmm_bo_vmap(bo, true);
552 if (vptr) {
553 vptr = vptr + (virt - bo->start);
554 memset(vptr, c, bytes);
555 clflush_cache_range(vptr, bytes);
556 hmm_bo_vunmap(bo);
557 return 0;
558 }
559 }
560
561 while (bytes) {
562 idx = (virt - bo->start) >> PAGE_SHIFT;
563 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
564
565 des = (char *)kmap(bo->page_obj[idx].page) + offset;
566
567 if ((bytes + offset) >= PAGE_SIZE) {
568 len = PAGE_SIZE - offset;
569 bytes -= len;
570 } else {
571 len = bytes;
572 bytes = 0;
573 }
574
575 virt += len;
576
577 memset(des, c, len);
578
579 clflush_cache_range(des, len);
580
581 kunmap(bo->page_obj[idx].page);
582 }
583
584 return 0;
585 }
586
587 /* Virtual address to physical address convert */
hmm_virt_to_phys(ia_css_ptr virt)588 phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
589 {
590 unsigned int idx, offset;
591 struct hmm_buffer_object *bo;
592
593 bo = hmm_bo_device_search_in_range(&bo_device, virt);
594 if (!bo) {
595 dev_err(atomisp_dev,
596 "can not find buffer object contains address 0x%x\n",
597 virt);
598 return -1;
599 }
600
601 idx = (virt - bo->start) >> PAGE_SHIFT;
602 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
603
604 return page_to_phys(bo->page_obj[idx].page) + offset;
605 }
606
hmm_mmap(struct vm_area_struct * vma,ia_css_ptr virt)607 int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
608 {
609 struct hmm_buffer_object *bo;
610
611 bo = hmm_bo_device_search_start(&bo_device, virt);
612 if (!bo) {
613 dev_err(atomisp_dev,
614 "can not find buffer object start with address 0x%x\n",
615 virt);
616 return -EINVAL;
617 }
618
619 return hmm_bo_mmap(vma, bo);
620 }
621
622 /* Map ISP virtual address into IA virtual address */
hmm_vmap(ia_css_ptr virt,bool cached)623 void *hmm_vmap(ia_css_ptr virt, bool cached)
624 {
625 struct hmm_buffer_object *bo;
626 void *ptr;
627
628 bo = hmm_bo_device_search_in_range(&bo_device, virt);
629 if (!bo) {
630 dev_err(atomisp_dev,
631 "can not find buffer object contains address 0x%x\n",
632 virt);
633 return NULL;
634 }
635
636 ptr = hmm_bo_vmap(bo, cached);
637 if (ptr)
638 return ptr + (virt - bo->start);
639 else
640 return NULL;
641 }
642
643 /* Flush the memory which is mapped as cached memory through hmm_vmap */
hmm_flush_vmap(ia_css_ptr virt)644 void hmm_flush_vmap(ia_css_ptr virt)
645 {
646 struct hmm_buffer_object *bo;
647
648 bo = hmm_bo_device_search_in_range(&bo_device, virt);
649 if (!bo) {
650 dev_warn(atomisp_dev,
651 "can not find buffer object contains address 0x%x\n",
652 virt);
653 return;
654 }
655
656 hmm_bo_flush_vmap(bo);
657 }
658
hmm_vunmap(ia_css_ptr virt)659 void hmm_vunmap(ia_css_ptr virt)
660 {
661 struct hmm_buffer_object *bo;
662
663 bo = hmm_bo_device_search_in_range(&bo_device, virt);
664 if (!bo) {
665 dev_warn(atomisp_dev,
666 "can not find buffer object contains address 0x%x\n",
667 virt);
668 return;
669 }
670
671 hmm_bo_vunmap(bo);
672 }
673
hmm_pool_register(unsigned int pool_size,enum hmm_pool_type pool_type)674 int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type)
675 {
676 #if 0 // Just use the "normal" pool
677 switch (pool_type) {
678 case HMM_POOL_TYPE_RESERVED:
679 reserved_pool.pops = &reserved_pops;
680 return reserved_pool.pops->pool_init(&reserved_pool.pool_info,
681 pool_size);
682 case HMM_POOL_TYPE_DYNAMIC:
683 dynamic_pool.pops = &dynamic_pops;
684 return dynamic_pool.pops->pool_init(&dynamic_pool.pool_info,
685 pool_size);
686 default:
687 dev_err(atomisp_dev, "invalid pool type.\n");
688 return -EINVAL;
689 }
690 #else
691 return 0;
692 #endif
693 }
694
hmm_pool_unregister(enum hmm_pool_type pool_type)695 void hmm_pool_unregister(enum hmm_pool_type pool_type)
696 {
697 #if 0 // Just use the "normal" pool
698 switch (pool_type) {
699 case HMM_POOL_TYPE_RESERVED:
700 if (reserved_pool.pops && reserved_pool.pops->pool_exit)
701 reserved_pool.pops->pool_exit(&reserved_pool.pool_info);
702 break;
703 case HMM_POOL_TYPE_DYNAMIC:
704 if (dynamic_pool.pops && dynamic_pool.pops->pool_exit)
705 dynamic_pool.pops->pool_exit(&dynamic_pool.pool_info);
706 break;
707 default:
708 dev_err(atomisp_dev, "invalid pool type.\n");
709 break;
710 }
711 #endif
712
713 return;
714 }
715
hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr,bool cached)716 void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached)
717 {
718 return hmm_vmap(ptr, cached);
719 /* vmunmap will be done in hmm_bo_release() */
720 }
721
hmm_host_vaddr_to_hrt_vaddr(const void * ptr)722 ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr)
723 {
724 struct hmm_buffer_object *bo;
725
726 bo = hmm_bo_device_search_vmap_start(&bo_device, ptr);
727 if (bo)
728 return bo->start;
729
730 dev_err(atomisp_dev,
731 "can not find buffer object whose kernel virtual address is %p\n",
732 ptr);
733 return 0;
734 }
735
hmm_show_mem_stat(const char * func,const int line)736 void hmm_show_mem_stat(const char *func, const int line)
737 {
738 pr_info("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n",
739 hmm_mem_stat.tol_cnt,
740 hmm_mem_stat.usr_size, hmm_mem_stat.res_size,
741 hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size,
742 hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size);
743 }
744
hmm_init_mem_stat(int res_pgnr,int dyc_en,int dyc_pgnr)745 void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr)
746 {
747 hmm_mem_stat.res_size = res_pgnr;
748 /* If reserved mem pool is not enabled, set its "mem stat" values as -1. */
749 if (hmm_mem_stat.res_size == 0) {
750 hmm_mem_stat.res_size = -1;
751 hmm_mem_stat.res_cnt = -1;
752 }
753
754 /* If dynamic memory pool is not enabled, set its "mem stat" values as -1. */
755 if (!dyc_en) {
756 hmm_mem_stat.dyc_size = -1;
757 hmm_mem_stat.dyc_thr = -1;
758 } else {
759 hmm_mem_stat.dyc_size = 0;
760 hmm_mem_stat.dyc_thr = dyc_pgnr;
761 }
762 hmm_mem_stat.usr_size = 0;
763 hmm_mem_stat.sys_size = 0;
764 hmm_mem_stat.tol_cnt = 0;
765 }
766