xref: /linux/drivers/staging/media/atomisp/pci/hmm/hmm.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Support for Medifield PNW Camera Imaging ISP subsystem.
4  *
5  * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
6  *
7  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License version
11  * 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  *
19  */
20 /*
21  * This file contains entry functions for memory management of ISP driver
22  */
23 #include <linux/kernel.h>
24 #include <linux/types.h>
25 #include <linux/mm.h>
26 #include <linux/highmem.h>	/* for kmap */
27 #include <linux/io.h>		/* for page_to_phys */
28 #include <linux/sysfs.h>
29 
30 #include "hmm/hmm.h"
31 #include "hmm/hmm_pool.h"
32 #include "hmm/hmm_bo.h"
33 
34 #include "atomisp_internal.h"
35 #include "asm/cacheflush.h"
36 #include "mmu/isp_mmu.h"
37 #include "mmu/sh_mmu_mrfld.h"
38 
39 struct hmm_bo_device bo_device;
40 struct hmm_pool	dynamic_pool;
41 struct hmm_pool	reserved_pool;
42 static ia_css_ptr dummy_ptr = mmgr_EXCEPTION;
43 static bool hmm_initialized;
44 struct _hmm_mem_stat hmm_mem_stat;
45 
46 /*
47  * p: private
48  * s: shared
49  * u: user
50  * i: ion
51  */
52 static const char hmm_bo_type_string[] = "psui";
53 
54 static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
55 		       char *buf, struct list_head *bo_list, bool active)
56 {
57 	ssize_t ret = 0;
58 	struct hmm_buffer_object *bo;
59 	unsigned long flags;
60 	int i;
61 	long total[HMM_BO_LAST] = { 0 };
62 	long count[HMM_BO_LAST] = { 0 };
63 	int index1 = 0;
64 	int index2 = 0;
65 
66 	ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n");
67 	if (ret <= 0)
68 		return 0;
69 
70 	index1 += ret;
71 
72 	spin_lock_irqsave(&bo_device.list_lock, flags);
73 	list_for_each_entry(bo, bo_list, list) {
74 		if ((active && (bo->status & HMM_BO_ALLOCED)) ||
75 		    (!active && !(bo->status & HMM_BO_ALLOCED))) {
76 			ret = scnprintf(buf + index1, PAGE_SIZE - index1,
77 					"%c %d\n",
78 					hmm_bo_type_string[bo->type], bo->pgnr);
79 
80 			total[bo->type] += bo->pgnr;
81 			count[bo->type]++;
82 			if (ret > 0)
83 				index1 += ret;
84 		}
85 	}
86 	spin_unlock_irqrestore(&bo_device.list_lock, flags);
87 
88 	for (i = 0; i < HMM_BO_LAST; i++) {
89 		if (count[i]) {
90 			ret = scnprintf(buf + index1 + index2,
91 					PAGE_SIZE - index1 - index2,
92 					"%ld %c buffer objects: %ld KB\n",
93 					count[i], hmm_bo_type_string[i],
94 					total[i] * 4);
95 			if (ret > 0)
96 				index2 += ret;
97 		}
98 	}
99 
100 	/* Add trailing zero, not included by scnprintf */
101 	return index1 + index2 + 1;
102 }
103 
104 static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr,
105 			      char *buf)
106 {
107 	return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
108 }
109 
110 static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr,
111 			    char *buf)
112 {
113 	return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
114 }
115 
116 static ssize_t reserved_pool_show(struct device *dev,
117 				  struct device_attribute *attr,
118 				  char *buf)
119 {
120 	ssize_t ret = 0;
121 
122 	struct hmm_reserved_pool_info *pinfo = reserved_pool.pool_info;
123 	unsigned long flags;
124 
125 	if (!pinfo || !pinfo->initialized)
126 		return 0;
127 
128 	spin_lock_irqsave(&pinfo->list_lock, flags);
129 	ret = scnprintf(buf, PAGE_SIZE, "%d out of %d pages available\n",
130 			pinfo->index, pinfo->pgnr);
131 	spin_unlock_irqrestore(&pinfo->list_lock, flags);
132 
133 	if (ret > 0)
134 		ret++; /* Add trailing zero, not included by scnprintf */
135 
136 	return ret;
137 };
138 
139 static ssize_t dynamic_pool_show(struct device *dev,
140 				 struct device_attribute *attr,
141 				 char *buf)
142 {
143 	ssize_t ret = 0;
144 
145 	struct hmm_dynamic_pool_info *pinfo = dynamic_pool.pool_info;
146 	unsigned long flags;
147 
148 	if (!pinfo || !pinfo->initialized)
149 		return 0;
150 
151 	spin_lock_irqsave(&pinfo->list_lock, flags);
152 	ret = scnprintf(buf, PAGE_SIZE, "%d (max %d) pages available\n",
153 			pinfo->pgnr, pinfo->pool_size);
154 	spin_unlock_irqrestore(&pinfo->list_lock, flags);
155 
156 	if (ret > 0)
157 		ret++; /* Add trailing zero, not included by scnprintf */
158 
159 	return ret;
160 };
161 
162 static DEVICE_ATTR_RO(active_bo);
163 static DEVICE_ATTR_RO(free_bo);
164 static DEVICE_ATTR_RO(reserved_pool);
165 static DEVICE_ATTR_RO(dynamic_pool);
166 
167 static struct attribute *sysfs_attrs_ctrl[] = {
168 	&dev_attr_active_bo.attr,
169 	&dev_attr_free_bo.attr,
170 	&dev_attr_reserved_pool.attr,
171 	&dev_attr_dynamic_pool.attr,
172 	NULL
173 };
174 
175 static struct attribute_group atomisp_attribute_group[] = {
176 	{.attrs = sysfs_attrs_ctrl },
177 };
178 
179 int hmm_init(void)
180 {
181 	int ret;
182 
183 	ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
184 				 ISP_VM_START, ISP_VM_SIZE);
185 	if (ret)
186 		dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
187 
188 	hmm_initialized = true;
189 
190 	/*
191 	 * As hmm use NULL to indicate invalid ISP virtual address,
192 	 * and ISP_VM_START is defined to 0 too, so we allocate
193 	 * one piece of dummy memory, which should return value 0,
194 	 * at the beginning, to avoid hmm_alloc return 0 in the
195 	 * further allocation.
196 	 */
197 	dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, NULL, 0);
198 
199 	if (!ret) {
200 		ret = sysfs_create_group(&atomisp_dev->kobj,
201 					 atomisp_attribute_group);
202 		if (ret)
203 			dev_err(atomisp_dev,
204 				"%s Failed to create sysfs\n", __func__);
205 	}
206 
207 	return ret;
208 }
209 
210 void hmm_cleanup(void)
211 {
212 	if (dummy_ptr == mmgr_EXCEPTION)
213 		return;
214 	sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
215 
216 	/* free dummy memory first */
217 	hmm_free(dummy_ptr);
218 	dummy_ptr = 0;
219 
220 	hmm_bo_device_exit(&bo_device);
221 	hmm_initialized = false;
222 }
223 
224 ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
225 		     int from_highmem, const void __user *userptr,
226 		     const uint16_t attrs)
227 {
228 	unsigned int pgnr;
229 	struct hmm_buffer_object *bo;
230 	bool cached = attrs & ATOMISP_MAP_FLAG_CACHED;
231 	int ret;
232 
233 	WARN_ON(attrs & ATOMISP_MAP_FLAG_CONTIGUOUS);
234 
235 	/*
236 	 * Check if we are initialized. In the ideal world we wouldn't need
237 	 * this but we can tackle it once the driver is a lot cleaner
238 	 */
239 
240 	if (!hmm_initialized)
241 		hmm_init();
242 	/* Get page number from size */
243 	pgnr = size_to_pgnr_ceil(bytes);
244 
245 	/* Buffer object structure init */
246 	bo = hmm_bo_alloc(&bo_device, pgnr);
247 	if (!bo) {
248 		dev_err(atomisp_dev, "hmm_bo_create failed.\n");
249 		goto create_bo_err;
250 	}
251 
252 	/* Allocate pages for memory */
253 	ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached);
254 	if (ret) {
255 		dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
256 		goto alloc_page_err;
257 	}
258 
259 	/* Combine the virtual address and pages together */
260 	ret = hmm_bo_bind(bo);
261 	if (ret) {
262 		dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
263 		goto bind_err;
264 	}
265 
266 	hmm_mem_stat.tol_cnt += pgnr;
267 
268 	if (attrs & ATOMISP_MAP_FLAG_CLEARED)
269 		hmm_set(bo->start, 0, bytes);
270 
271 	dev_dbg(atomisp_dev,
272 		"%s: pages: 0x%08x (%zu bytes), type: %d from highmem %d, user ptr %p, cached %d\n",
273 		__func__, bo->start, bytes, type, from_highmem, userptr, cached);
274 
275 	return bo->start;
276 
277 bind_err:
278 	hmm_bo_free_pages(bo);
279 alloc_page_err:
280 	hmm_bo_unref(bo);
281 create_bo_err:
282 	return 0;
283 }
284 
285 void hmm_free(ia_css_ptr virt)
286 {
287 	struct hmm_buffer_object *bo;
288 
289 	dev_dbg(atomisp_dev, "%s: free 0x%08x\n", __func__, virt);
290 
291 	if (WARN_ON(virt == mmgr_EXCEPTION))
292 		return;
293 
294 	bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
295 
296 	if (!bo) {
297 		dev_err(atomisp_dev,
298 			"can not find buffer object start with address 0x%x\n",
299 			(unsigned int)virt);
300 		return;
301 	}
302 
303 	hmm_mem_stat.tol_cnt -= bo->pgnr;
304 
305 	hmm_bo_unbind(bo);
306 	hmm_bo_free_pages(bo);
307 	hmm_bo_unref(bo);
308 }
309 
310 static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
311 {
312 	if (!bo) {
313 		dev_err(atomisp_dev,
314 			"can not find buffer object contains address 0x%x\n",
315 			ptr);
316 		return -EINVAL;
317 	}
318 
319 	if (!hmm_bo_page_allocated(bo)) {
320 		dev_err(atomisp_dev,
321 			"buffer object has no page allocated.\n");
322 		return -EINVAL;
323 	}
324 
325 	if (!hmm_bo_allocated(bo)) {
326 		dev_err(atomisp_dev,
327 			"buffer object has no virtual address space allocated.\n");
328 		return -EINVAL;
329 	}
330 
331 	return 0;
332 }
333 
334 /* Read function in ISP memory management */
335 static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
336 				  unsigned int bytes)
337 {
338 	struct hmm_buffer_object *bo;
339 	unsigned int idx, offset, len;
340 	char *src, *des;
341 	int ret;
342 
343 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
344 	ret = hmm_check_bo(bo, virt);
345 	if (ret)
346 		return ret;
347 
348 	des = (char *)data;
349 	while (bytes) {
350 		idx = (virt - bo->start) >> PAGE_SHIFT;
351 		offset = (virt - bo->start) - (idx << PAGE_SHIFT);
352 
353 		src = (char *)kmap(bo->page_obj[idx].page) + offset;
354 
355 		if ((bytes + offset) >= PAGE_SIZE) {
356 			len = PAGE_SIZE - offset;
357 			bytes -= len;
358 		} else {
359 			len = bytes;
360 			bytes = 0;
361 		}
362 
363 		virt += len;	/* update virt for next loop */
364 
365 		if (des) {
366 			memcpy(des, src, len);
367 			des += len;
368 		}
369 
370 		clflush_cache_range(src, len);
371 
372 		kunmap(bo->page_obj[idx].page);
373 	}
374 
375 	return 0;
376 }
377 
378 /* Read function in ISP memory management */
379 static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
380 {
381 	struct hmm_buffer_object *bo;
382 	int ret;
383 
384 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
385 	ret = hmm_check_bo(bo, virt);
386 	if (ret)
387 		return ret;
388 
389 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
390 		void *src = bo->vmap_addr;
391 
392 		src += (virt - bo->start);
393 		memcpy(data, src, bytes);
394 		if (bo->status & HMM_BO_VMAPED_CACHED)
395 			clflush_cache_range(src, bytes);
396 	} else {
397 		void *vptr;
398 
399 		vptr = hmm_bo_vmap(bo, true);
400 		if (!vptr)
401 			return load_and_flush_by_kmap(virt, data, bytes);
402 		else
403 			vptr = vptr + (virt - bo->start);
404 
405 		memcpy(data, vptr, bytes);
406 		clflush_cache_range(vptr, bytes);
407 		hmm_bo_vunmap(bo);
408 	}
409 
410 	return 0;
411 }
412 
413 /* Read function in ISP memory management */
414 int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
415 {
416 	if (!virt) {
417 		dev_warn(atomisp_dev,
418 			"hmm_store: address is NULL\n");
419 		return -EINVAL;
420 	}
421 	if (!data) {
422 		dev_err(atomisp_dev,
423 			"hmm_store: data is a NULL argument\n");
424 		return -EINVAL;
425 	}
426 	return load_and_flush(virt, data, bytes);
427 }
428 
429 /* Flush hmm data from the data cache */
430 int hmm_flush(ia_css_ptr virt, unsigned int bytes)
431 {
432 	return load_and_flush(virt, NULL, bytes);
433 }
434 
435 /* Write function in ISP memory management */
436 int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
437 {
438 	struct hmm_buffer_object *bo;
439 	unsigned int idx, offset, len;
440 	char *src, *des;
441 	int ret;
442 
443 	if (!virt) {
444 		dev_warn(atomisp_dev,
445 			"hmm_store: address is NULL\n");
446 		return -EINVAL;
447 	}
448 	if (!data) {
449 		dev_err(atomisp_dev,
450 			"hmm_store: data is a NULL argument\n");
451 		return -EINVAL;
452 	}
453 
454 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
455 	ret = hmm_check_bo(bo, virt);
456 	if (ret)
457 		return ret;
458 
459 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
460 		void *dst = bo->vmap_addr;
461 
462 		dst += (virt - bo->start);
463 		memcpy(dst, data, bytes);
464 		if (bo->status & HMM_BO_VMAPED_CACHED)
465 			clflush_cache_range(dst, bytes);
466 	} else {
467 		void *vptr;
468 
469 		vptr = hmm_bo_vmap(bo, true);
470 		if (vptr) {
471 			vptr = vptr + (virt - bo->start);
472 
473 			memcpy(vptr, data, bytes);
474 			clflush_cache_range(vptr, bytes);
475 			hmm_bo_vunmap(bo);
476 			return 0;
477 		}
478 	}
479 
480 	src = (char *)data;
481 	while (bytes) {
482 		idx = (virt - bo->start) >> PAGE_SHIFT;
483 		offset = (virt - bo->start) - (idx << PAGE_SHIFT);
484 
485 		if (in_atomic())
486 			des = (char *)kmap_atomic(bo->page_obj[idx].page);
487 		else
488 			des = (char *)kmap(bo->page_obj[idx].page);
489 
490 		if (!des) {
491 			dev_err(atomisp_dev,
492 				"kmap buffer object page failed: pg_idx = %d\n",
493 				idx);
494 			return -EINVAL;
495 		}
496 
497 		des += offset;
498 
499 		if ((bytes + offset) >= PAGE_SIZE) {
500 			len = PAGE_SIZE - offset;
501 			bytes -= len;
502 		} else {
503 			len = bytes;
504 			bytes = 0;
505 		}
506 
507 		virt += len;
508 
509 		memcpy(des, src, len);
510 
511 		src += len;
512 
513 		clflush_cache_range(des, len);
514 
515 		if (in_atomic())
516 			/*
517 			 * Note: kunmap_atomic requires return addr from
518 			 * kmap_atomic, not the page. See linux/highmem.h
519 			 */
520 			kunmap_atomic(des - offset);
521 		else
522 			kunmap(bo->page_obj[idx].page);
523 	}
524 
525 	return 0;
526 }
527 
528 /* memset function in ISP memory management */
529 int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
530 {
531 	struct hmm_buffer_object *bo;
532 	unsigned int idx, offset, len;
533 	char *des;
534 	int ret;
535 
536 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
537 	ret = hmm_check_bo(bo, virt);
538 	if (ret)
539 		return ret;
540 
541 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
542 		void *dst = bo->vmap_addr;
543 
544 		dst += (virt - bo->start);
545 		memset(dst, c, bytes);
546 
547 		if (bo->status & HMM_BO_VMAPED_CACHED)
548 			clflush_cache_range(dst, bytes);
549 	} else {
550 		void *vptr;
551 
552 		vptr = hmm_bo_vmap(bo, true);
553 		if (vptr) {
554 			vptr = vptr + (virt - bo->start);
555 			memset(vptr, c, bytes);
556 			clflush_cache_range(vptr, bytes);
557 			hmm_bo_vunmap(bo);
558 			return 0;
559 		}
560 	}
561 
562 	while (bytes) {
563 		idx = (virt - bo->start) >> PAGE_SHIFT;
564 		offset = (virt - bo->start) - (idx << PAGE_SHIFT);
565 
566 		des = (char *)kmap(bo->page_obj[idx].page) + offset;
567 
568 		if ((bytes + offset) >= PAGE_SIZE) {
569 			len = PAGE_SIZE - offset;
570 			bytes -= len;
571 		} else {
572 			len = bytes;
573 			bytes = 0;
574 		}
575 
576 		virt += len;
577 
578 		memset(des, c, len);
579 
580 		clflush_cache_range(des, len);
581 
582 		kunmap(bo->page_obj[idx].page);
583 	}
584 
585 	return 0;
586 }
587 
588 /* Virtual address to physical address convert */
589 phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
590 {
591 	unsigned int idx, offset;
592 	struct hmm_buffer_object *bo;
593 
594 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
595 	if (!bo) {
596 		dev_err(atomisp_dev,
597 			"can not find buffer object contains address 0x%x\n",
598 			virt);
599 		return -1;
600 	}
601 
602 	idx = (virt - bo->start) >> PAGE_SHIFT;
603 	offset = (virt - bo->start) - (idx << PAGE_SHIFT);
604 
605 	return page_to_phys(bo->page_obj[idx].page) + offset;
606 }
607 
608 int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
609 {
610 	struct hmm_buffer_object *bo;
611 
612 	bo = hmm_bo_device_search_start(&bo_device, virt);
613 	if (!bo) {
614 		dev_err(atomisp_dev,
615 			"can not find buffer object start with address 0x%x\n",
616 			virt);
617 		return -EINVAL;
618 	}
619 
620 	return hmm_bo_mmap(vma, bo);
621 }
622 
623 /* Map ISP virtual address into IA virtual address */
624 void *hmm_vmap(ia_css_ptr virt, bool cached)
625 {
626 	struct hmm_buffer_object *bo;
627 	void *ptr;
628 
629 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
630 	if (!bo) {
631 		dev_err(atomisp_dev,
632 			"can not find buffer object contains address 0x%x\n",
633 			virt);
634 		return NULL;
635 	}
636 
637 	ptr = hmm_bo_vmap(bo, cached);
638 	if (ptr)
639 		return ptr + (virt - bo->start);
640 	else
641 		return NULL;
642 }
643 
644 /* Flush the memory which is mapped as cached memory through hmm_vmap */
645 void hmm_flush_vmap(ia_css_ptr virt)
646 {
647 	struct hmm_buffer_object *bo;
648 
649 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
650 	if (!bo) {
651 		dev_warn(atomisp_dev,
652 			 "can not find buffer object contains address 0x%x\n",
653 			 virt);
654 		return;
655 	}
656 
657 	hmm_bo_flush_vmap(bo);
658 }
659 
660 void hmm_vunmap(ia_css_ptr virt)
661 {
662 	struct hmm_buffer_object *bo;
663 
664 	bo = hmm_bo_device_search_in_range(&bo_device, virt);
665 	if (!bo) {
666 		dev_warn(atomisp_dev,
667 			 "can not find buffer object contains address 0x%x\n",
668 			 virt);
669 		return;
670 	}
671 
672 	hmm_bo_vunmap(bo);
673 }
674 
675 int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type)
676 {
677 #if 0	// Just use the "normal" pool
678 	switch (pool_type) {
679 	case HMM_POOL_TYPE_RESERVED:
680 		reserved_pool.pops = &reserved_pops;
681 		return reserved_pool.pops->pool_init(&reserved_pool.pool_info,
682 						     pool_size);
683 	case HMM_POOL_TYPE_DYNAMIC:
684 		dynamic_pool.pops = &dynamic_pops;
685 		return dynamic_pool.pops->pool_init(&dynamic_pool.pool_info,
686 						    pool_size);
687 	default:
688 		dev_err(atomisp_dev, "invalid pool type.\n");
689 		return -EINVAL;
690 	}
691 #else
692 	return 0;
693 #endif
694 }
695 
696 void hmm_pool_unregister(enum hmm_pool_type pool_type)
697 {
698 #if 0	// Just use the "normal" pool
699 	switch (pool_type) {
700 	case HMM_POOL_TYPE_RESERVED:
701 		if (reserved_pool.pops && reserved_pool.pops->pool_exit)
702 			reserved_pool.pops->pool_exit(&reserved_pool.pool_info);
703 		break;
704 	case HMM_POOL_TYPE_DYNAMIC:
705 		if (dynamic_pool.pops && dynamic_pool.pops->pool_exit)
706 			dynamic_pool.pops->pool_exit(&dynamic_pool.pool_info);
707 		break;
708 	default:
709 		dev_err(atomisp_dev, "invalid pool type.\n");
710 		break;
711 	}
712 #endif
713 
714 	return;
715 }
716 
717 void *hmm_isp_vaddr_to_host_vaddr(ia_css_ptr ptr, bool cached)
718 {
719 	return hmm_vmap(ptr, cached);
720 	/* vmunmap will be done in hmm_bo_release() */
721 }
722 
723 ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr)
724 {
725 	struct hmm_buffer_object *bo;
726 
727 	bo = hmm_bo_device_search_vmap_start(&bo_device, ptr);
728 	if (bo)
729 		return bo->start;
730 
731 	dev_err(atomisp_dev,
732 		"can not find buffer object whose kernel virtual address is %p\n",
733 		ptr);
734 	return 0;
735 }
736 
737 void hmm_show_mem_stat(const char *func, const int line)
738 {
739 	pr_info("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d  dyc_thr=%d dyc_size=%d.\n",
740 		hmm_mem_stat.tol_cnt,
741 		hmm_mem_stat.usr_size, hmm_mem_stat.res_size,
742 		hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size,
743 		hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size);
744 }
745 
746 void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr)
747 {
748 	hmm_mem_stat.res_size = res_pgnr;
749 	/* If reserved mem pool is not enabled, set its "mem stat" values as -1. */
750 	if (hmm_mem_stat.res_size == 0) {
751 		hmm_mem_stat.res_size = -1;
752 		hmm_mem_stat.res_cnt = -1;
753 	}
754 
755 	/* If dynamic memory pool is not enabled, set its "mem stat" values as -1. */
756 	if (!dyc_en) {
757 		hmm_mem_stat.dyc_size = -1;
758 		hmm_mem_stat.dyc_thr = -1;
759 	} else {
760 		hmm_mem_stat.dyc_size = 0;
761 		hmm_mem_stat.dyc_thr = dyc_pgnr;
762 	}
763 	hmm_mem_stat.usr_size = 0;
764 	hmm_mem_stat.sys_size = 0;
765 	hmm_mem_stat.tol_cnt = 0;
766 }
767