1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Support for Medifield PNW Camera Imaging ISP subsystem.
4  *
5  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6  *
7  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License version
11  * 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  *
19  */
20 /*
21  * This file contains functions for buffer object structure management
22  */
23 #include <linux/kernel.h>
24 #include <linux/types.h>
25 #include <linux/gfp.h>		/* for GFP_ATOMIC */
26 #include <linux/mm.h>
27 #include <linux/mm_types.h>
28 #include <linux/hugetlb.h>
29 #include <linux/highmem.h>
30 #include <linux/slab.h>		/* for kmalloc */
31 #include <linux/module.h>
32 #include <linux/moduleparam.h>
33 #include <linux/string.h>
34 #include <linux/list.h>
35 #include <linux/errno.h>
36 #include <linux/io.h>
37 #include <asm/current.h>
38 #include <linux/sched/signal.h>
39 #include <linux/file.h>
40 
41 #include <asm/set_memory.h>
42 
43 #include "atomisp_internal.h"
44 #include "hmm/hmm_common.h"
45 #include "hmm/hmm_bo.h"
46 
47 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
48 		     unsigned int pgnr)
49 {
50 	check_bodev_null_return(bdev, -EINVAL);
51 	var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
52 			 "hmm_bo_device not inited yet.\n");
53 	/* prevent zero size buffer object */
54 	if (pgnr == 0) {
55 		dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
56 		return -EINVAL;
57 	}
58 
59 	memset(bo, 0, sizeof(*bo));
60 	mutex_init(&bo->mutex);
61 
62 	/* init the bo->list HEAD as an element of entire_bo_list */
63 	INIT_LIST_HEAD(&bo->list);
64 
65 	bo->bdev = bdev;
66 	bo->vmap_addr = NULL;
67 	bo->status = HMM_BO_FREE;
68 	bo->start = bdev->start;
69 	bo->pgnr = pgnr;
70 	bo->end = bo->start + pgnr_to_size(pgnr);
71 	bo->prev = NULL;
72 	bo->next = NULL;
73 
74 	return 0;
75 }
76 
77 static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
78     struct rb_node *node, unsigned int pgnr)
79 {
80 	struct hmm_buffer_object *this, *ret_bo, *temp_bo;
81 
82 	this = rb_entry(node, struct hmm_buffer_object, node);
83 	if (this->pgnr == pgnr ||
84 	    (this->pgnr > pgnr && !this->node.rb_left)) {
85 		goto remove_bo_and_return;
86 	} else {
87 		if (this->pgnr < pgnr) {
88 			if (!this->node.rb_right)
89 				return NULL;
90 			ret_bo = __bo_search_and_remove_from_free_rbtree(
91 				     this->node.rb_right, pgnr);
92 		} else {
93 			ret_bo = __bo_search_and_remove_from_free_rbtree(
94 				     this->node.rb_left, pgnr);
95 		}
96 		if (!ret_bo) {
97 			if (this->pgnr > pgnr)
98 				goto remove_bo_and_return;
99 			else
100 				return NULL;
101 		}
102 		return ret_bo;
103 	}
104 
105 remove_bo_and_return:
106 	/* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
107 	 * 1. check if 'this->next' is NULL:
108 	 *	yes: erase 'this' node and rebalance rbtree, return 'this'.
109 	 */
110 	if (!this->next) {
111 		rb_erase(&this->node, &this->bdev->free_rbtree);
112 		return this;
113 	}
114 	/* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
115 	 * 2. check if 'this->next->next' is NULL:
116 	 *	yes: change the related 'next/prev' pointer,
117 	 *		return 'this->next' but the rbtree stays unchanged.
118 	 */
119 	temp_bo = this->next;
120 	this->next = temp_bo->next;
121 	if (temp_bo->next)
122 		temp_bo->next->prev = this;
123 	temp_bo->next = NULL;
124 	temp_bo->prev = NULL;
125 	return temp_bo;
126 }
127 
128 static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
129 	ia_css_ptr start)
130 {
131 	struct rb_node *n = root->rb_node;
132 	struct hmm_buffer_object *bo;
133 
134 	do {
135 		bo = rb_entry(n, struct hmm_buffer_object, node);
136 
137 		if (bo->start > start) {
138 			if (!n->rb_left)
139 				return NULL;
140 			n = n->rb_left;
141 		} else if (bo->start < start) {
142 			if (!n->rb_right)
143 				return NULL;
144 			n = n->rb_right;
145 		} else {
146 			return bo;
147 		}
148 	} while (n);
149 
150 	return NULL;
151 }
152 
153 static struct hmm_buffer_object *__bo_search_by_addr_in_range(
154     struct rb_root *root, unsigned int start)
155 {
156 	struct rb_node *n = root->rb_node;
157 	struct hmm_buffer_object *bo;
158 
159 	do {
160 		bo = rb_entry(n, struct hmm_buffer_object, node);
161 
162 		if (bo->start > start) {
163 			if (!n->rb_left)
164 				return NULL;
165 			n = n->rb_left;
166 		} else {
167 			if (bo->end > start)
168 				return bo;
169 			if (!n->rb_right)
170 				return NULL;
171 			n = n->rb_right;
172 		}
173 	} while (n);
174 
175 	return NULL;
176 }
177 
178 static void __bo_insert_to_free_rbtree(struct rb_root *root,
179 				       struct hmm_buffer_object *bo)
180 {
181 	struct rb_node **new = &root->rb_node;
182 	struct rb_node *parent = NULL;
183 	struct hmm_buffer_object *this;
184 	unsigned int pgnr = bo->pgnr;
185 
186 	while (*new) {
187 		parent = *new;
188 		this = container_of(*new, struct hmm_buffer_object, node);
189 
190 		if (pgnr < this->pgnr) {
191 			new = &((*new)->rb_left);
192 		} else if (pgnr > this->pgnr) {
193 			new = &((*new)->rb_right);
194 		} else {
195 			bo->prev = this;
196 			bo->next = this->next;
197 			if (this->next)
198 				this->next->prev = bo;
199 			this->next = bo;
200 			bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
201 			return;
202 		}
203 	}
204 
205 	bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
206 
207 	rb_link_node(&bo->node, parent, new);
208 	rb_insert_color(&bo->node, root);
209 }
210 
211 static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
212 					struct hmm_buffer_object *bo)
213 {
214 	struct rb_node **new = &root->rb_node;
215 	struct rb_node *parent = NULL;
216 	struct hmm_buffer_object *this;
217 	unsigned int start = bo->start;
218 
219 	while (*new) {
220 		parent = *new;
221 		this = container_of(*new, struct hmm_buffer_object, node);
222 
223 		if (start < this->start)
224 			new = &((*new)->rb_left);
225 		else
226 			new = &((*new)->rb_right);
227 	}
228 
229 	kref_init(&bo->kref);
230 	bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
231 
232 	rb_link_node(&bo->node, parent, new);
233 	rb_insert_color(&bo->node, root);
234 }
235 
236 static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
237 	struct hmm_buffer_object *bo,
238 	unsigned int pgnr)
239 {
240 	struct hmm_buffer_object *new_bo;
241 	unsigned long flags;
242 	int ret;
243 
244 	new_bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
245 	if (!new_bo) {
246 		dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
247 		return NULL;
248 	}
249 	ret = __bo_init(bdev, new_bo, pgnr);
250 	if (ret) {
251 		dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
252 		kmem_cache_free(bdev->bo_cache, new_bo);
253 		return NULL;
254 	}
255 
256 	new_bo->start = bo->start;
257 	new_bo->end = new_bo->start + pgnr_to_size(pgnr);
258 	bo->start = new_bo->end;
259 	bo->pgnr = bo->pgnr - pgnr;
260 
261 	spin_lock_irqsave(&bdev->list_lock, flags);
262 	list_add_tail(&new_bo->list, &bo->list);
263 	spin_unlock_irqrestore(&bdev->list_lock, flags);
264 
265 	return new_bo;
266 }
267 
268 static void __bo_take_off_handling(struct hmm_buffer_object *bo)
269 {
270 	struct hmm_bo_device *bdev = bo->bdev;
271 	/* There are 4 situations when we take off a known bo from free rbtree:
272 	 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
273 	 *	and does not have a linked list after bo, to take off this bo,
274 	 *	we just need erase bo directly and rebalance the free rbtree
275 	 */
276 	if (!bo->prev && !bo->next) {
277 		rb_erase(&bo->node, &bdev->free_rbtree);
278 		/* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
279 		 *	and has a linked list,to take off this bo we need erase bo
280 		 *	first, then, insert bo->next into free rbtree and rebalance
281 		 *	the free rbtree
282 		 */
283 	} else if (!bo->prev && bo->next) {
284 		bo->next->prev = NULL;
285 		rb_erase(&bo->node, &bdev->free_rbtree);
286 		__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
287 		bo->next = NULL;
288 		/* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
289 		 *	node, bo is the last element of the linked list after rbtree
290 		 *	node, to take off this bo, we just need set the "prev/next"
291 		 *	pointers to NULL, the free rbtree stays unchaged
292 		 */
293 	} else if (bo->prev && !bo->next) {
294 		bo->prev->next = NULL;
295 		bo->prev = NULL;
296 		/* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
297 		 *	node, bo is in the middle of the linked list after rbtree node,
298 		 *	to take off this bo, we just set take the "prev/next" pointers
299 		 *	to NULL, the free rbtree stays unchaged
300 		 */
301 	} else if (bo->prev && bo->next) {
302 		bo->next->prev = bo->prev;
303 		bo->prev->next = bo->next;
304 		bo->next = NULL;
305 		bo->prev = NULL;
306 	}
307 }
308 
309 static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
310 	struct hmm_buffer_object *next_bo)
311 {
312 	struct hmm_bo_device *bdev;
313 	unsigned long flags;
314 
315 	bdev = bo->bdev;
316 	next_bo->start = bo->start;
317 	next_bo->pgnr = next_bo->pgnr + bo->pgnr;
318 
319 	spin_lock_irqsave(&bdev->list_lock, flags);
320 	list_del(&bo->list);
321 	spin_unlock_irqrestore(&bdev->list_lock, flags);
322 
323 	kmem_cache_free(bo->bdev->bo_cache, bo);
324 
325 	return next_bo;
326 }
327 
328 /*
329  * hmm_bo_device functions.
330  */
331 int hmm_bo_device_init(struct hmm_bo_device *bdev,
332 		       struct isp_mmu_client *mmu_driver,
333 		       unsigned int vaddr_start,
334 		       unsigned int size)
335 {
336 	struct hmm_buffer_object *bo;
337 	unsigned long flags;
338 	int ret;
339 
340 	check_bodev_null_return(bdev, -EINVAL);
341 
342 	ret = isp_mmu_init(&bdev->mmu, mmu_driver);
343 	if (ret) {
344 		dev_err(atomisp_dev, "isp_mmu_init failed.\n");
345 		return ret;
346 	}
347 
348 	bdev->start = vaddr_start;
349 	bdev->pgnr = size_to_pgnr_ceil(size);
350 	bdev->size = pgnr_to_size(bdev->pgnr);
351 
352 	spin_lock_init(&bdev->list_lock);
353 	mutex_init(&bdev->rbtree_mutex);
354 
355 	bdev->flag = HMM_BO_DEVICE_INITED;
356 
357 	INIT_LIST_HEAD(&bdev->entire_bo_list);
358 	bdev->allocated_rbtree = RB_ROOT;
359 	bdev->free_rbtree = RB_ROOT;
360 
361 	bdev->bo_cache = kmem_cache_create("bo_cache",
362 					   sizeof(struct hmm_buffer_object), 0, 0, NULL);
363 	if (!bdev->bo_cache) {
364 		dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
365 		isp_mmu_exit(&bdev->mmu);
366 		return -ENOMEM;
367 	}
368 
369 	bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
370 	if (!bo) {
371 		dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
372 		isp_mmu_exit(&bdev->mmu);
373 		return -ENOMEM;
374 	}
375 
376 	ret = __bo_init(bdev, bo, bdev->pgnr);
377 	if (ret) {
378 		dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
379 		kmem_cache_free(bdev->bo_cache, bo);
380 		isp_mmu_exit(&bdev->mmu);
381 		return -EINVAL;
382 	}
383 
384 	spin_lock_irqsave(&bdev->list_lock, flags);
385 	list_add_tail(&bo->list, &bdev->entire_bo_list);
386 	spin_unlock_irqrestore(&bdev->list_lock, flags);
387 
388 	__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
389 
390 	return 0;
391 }
392 
393 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
394 				       unsigned int pgnr)
395 {
396 	struct hmm_buffer_object *bo, *new_bo;
397 	struct rb_root *root = &bdev->free_rbtree;
398 
399 	check_bodev_null_return(bdev, NULL);
400 	var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
401 			 "hmm_bo_device not inited yet.\n");
402 
403 	if (pgnr == 0) {
404 		dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
405 		return NULL;
406 	}
407 
408 	mutex_lock(&bdev->rbtree_mutex);
409 	bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
410 	if (!bo) {
411 		mutex_unlock(&bdev->rbtree_mutex);
412 		dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
413 			__func__);
414 		return NULL;
415 	}
416 
417 	if (bo->pgnr > pgnr) {
418 		new_bo = __bo_break_up(bdev, bo, pgnr);
419 		if (!new_bo) {
420 			mutex_unlock(&bdev->rbtree_mutex);
421 			dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
422 				__func__);
423 			return NULL;
424 		}
425 
426 		__bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
427 		__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
428 
429 		mutex_unlock(&bdev->rbtree_mutex);
430 		return new_bo;
431 	}
432 
433 	__bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
434 
435 	mutex_unlock(&bdev->rbtree_mutex);
436 	return bo;
437 }
438 
439 void hmm_bo_release(struct hmm_buffer_object *bo)
440 {
441 	struct hmm_bo_device *bdev = bo->bdev;
442 	struct hmm_buffer_object *next_bo, *prev_bo;
443 
444 	mutex_lock(&bdev->rbtree_mutex);
445 
446 	/*
447 	 * FIX ME:
448 	 *
449 	 * how to destroy the bo when it is stilled MMAPED?
450 	 *
451 	 * ideally, this will not happened as hmm_bo_release
452 	 * will only be called when kref reaches 0, and in mmap
453 	 * operation the hmm_bo_ref will eventually be called.
454 	 * so, if this happened, something goes wrong.
455 	 */
456 	if (bo->status & HMM_BO_MMAPED) {
457 		mutex_unlock(&bdev->rbtree_mutex);
458 		dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
459 		return;
460 	}
461 
462 	if (bo->status & HMM_BO_BINDED) {
463 		dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
464 		hmm_bo_unbind(bo);
465 	}
466 
467 	if (bo->status & HMM_BO_PAGE_ALLOCED) {
468 		dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
469 		hmm_bo_free_pages(bo);
470 	}
471 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
472 		dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
473 		hmm_bo_vunmap(bo);
474 	}
475 
476 	rb_erase(&bo->node, &bdev->allocated_rbtree);
477 
478 	prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
479 	next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
480 
481 	if (bo->list.prev != &bdev->entire_bo_list &&
482 	    prev_bo->end == bo->start &&
483 	    (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
484 		__bo_take_off_handling(prev_bo);
485 		bo = __bo_merge(prev_bo, bo);
486 	}
487 
488 	if (bo->list.next != &bdev->entire_bo_list &&
489 	    next_bo->start == bo->end &&
490 	    (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
491 		__bo_take_off_handling(next_bo);
492 		bo = __bo_merge(bo, next_bo);
493 	}
494 
495 	__bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
496 
497 	mutex_unlock(&bdev->rbtree_mutex);
498 	return;
499 }
500 
501 void hmm_bo_device_exit(struct hmm_bo_device *bdev)
502 {
503 	struct hmm_buffer_object *bo;
504 	unsigned long flags;
505 
506 	dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
507 
508 	check_bodev_null_return_void(bdev);
509 
510 	/*
511 	 * release all allocated bos even they a in use
512 	 * and all bos will be merged into a big bo
513 	 */
514 	while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
515 		hmm_bo_release(
516 		    rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
517 
518 	dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
519 		__func__);
520 
521 	/* free all bos to release all ISP virtual memory */
522 	while (!list_empty(&bdev->entire_bo_list)) {
523 		bo = list_to_hmm_bo(bdev->entire_bo_list.next);
524 
525 		spin_lock_irqsave(&bdev->list_lock, flags);
526 		list_del(&bo->list);
527 		spin_unlock_irqrestore(&bdev->list_lock, flags);
528 
529 		kmem_cache_free(bdev->bo_cache, bo);
530 	}
531 
532 	dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
533 
534 	kmem_cache_destroy(bdev->bo_cache);
535 
536 	isp_mmu_exit(&bdev->mmu);
537 }
538 
539 int hmm_bo_device_inited(struct hmm_bo_device *bdev)
540 {
541 	check_bodev_null_return(bdev, -EINVAL);
542 
543 	return bdev->flag == HMM_BO_DEVICE_INITED;
544 }
545 
546 int hmm_bo_allocated(struct hmm_buffer_object *bo)
547 {
548 	check_bo_null_return(bo, 0);
549 
550 	return bo->status & HMM_BO_ALLOCED;
551 }
552 
553 struct hmm_buffer_object *hmm_bo_device_search_start(
554     struct hmm_bo_device *bdev, ia_css_ptr vaddr)
555 {
556 	struct hmm_buffer_object *bo;
557 
558 	check_bodev_null_return(bdev, NULL);
559 
560 	mutex_lock(&bdev->rbtree_mutex);
561 	bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
562 	if (!bo) {
563 		mutex_unlock(&bdev->rbtree_mutex);
564 		dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
565 			__func__, vaddr);
566 		return NULL;
567 	}
568 	mutex_unlock(&bdev->rbtree_mutex);
569 
570 	return bo;
571 }
572 
573 struct hmm_buffer_object *hmm_bo_device_search_in_range(
574     struct hmm_bo_device *bdev, unsigned int vaddr)
575 {
576 	struct hmm_buffer_object *bo;
577 
578 	check_bodev_null_return(bdev, NULL);
579 
580 	mutex_lock(&bdev->rbtree_mutex);
581 	bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
582 	if (!bo) {
583 		mutex_unlock(&bdev->rbtree_mutex);
584 		dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
585 			__func__, vaddr);
586 		return NULL;
587 	}
588 	mutex_unlock(&bdev->rbtree_mutex);
589 
590 	return bo;
591 }
592 
593 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
594     struct hmm_bo_device *bdev, const void *vaddr)
595 {
596 	struct list_head *pos;
597 	struct hmm_buffer_object *bo;
598 	unsigned long flags;
599 
600 	check_bodev_null_return(bdev, NULL);
601 
602 	spin_lock_irqsave(&bdev->list_lock, flags);
603 	list_for_each(pos, &bdev->entire_bo_list) {
604 		bo = list_to_hmm_bo(pos);
605 		/* pass bo which has no vm_node allocated */
606 		if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
607 			continue;
608 		if (bo->vmap_addr == vaddr)
609 			goto found;
610 	}
611 	spin_unlock_irqrestore(&bdev->list_lock, flags);
612 	return NULL;
613 found:
614 	spin_unlock_irqrestore(&bdev->list_lock, flags);
615 	return bo;
616 }
617 
618 static void free_pages_bulk_array(unsigned long nr_pages, struct page **page_array)
619 {
620 	unsigned long i;
621 
622 	for (i = 0; i < nr_pages; i++)
623 		__free_pages(page_array[i], 0);
624 }
625 
626 static void free_private_bo_pages(struct hmm_buffer_object *bo)
627 {
628 	set_pages_array_wb(bo->pages, bo->pgnr);
629 	free_pages_bulk_array(bo->pgnr, bo->pages);
630 }
631 
632 /*Allocate pages which will be used only by ISP*/
633 static int alloc_private_pages(struct hmm_buffer_object *bo)
634 {
635 	const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS;
636 	int ret;
637 
638 	ret = alloc_pages_bulk_array(gfp, bo->pgnr, bo->pages);
639 	if (ret != bo->pgnr) {
640 		free_pages_bulk_array(ret, bo->pages);
641 		return -ENOMEM;
642 	}
643 
644 	ret = set_pages_array_uc(bo->pages, bo->pgnr);
645 	if (ret) {
646 		dev_err(atomisp_dev, "set pages uncacheable failed.\n");
647 		free_pages_bulk_array(bo->pgnr, bo->pages);
648 		return ret;
649 	}
650 
651 	return 0;
652 }
653 
654 static void free_user_pages(struct hmm_buffer_object *bo,
655 			    unsigned int page_nr)
656 {
657 	int i;
658 
659 	for (i = 0; i < page_nr; i++)
660 		put_page(bo->pages[i]);
661 }
662 
663 /*
664  * Convert user space virtual address into pages list
665  */
666 static int alloc_user_pages(struct hmm_buffer_object *bo,
667 			    const void __user *userptr)
668 {
669 	int page_nr;
670 
671 	userptr = untagged_addr(userptr);
672 
673 	/* Handle frame buffer allocated in user space */
674 	mutex_unlock(&bo->mutex);
675 	page_nr = get_user_pages_fast((unsigned long)userptr, bo->pgnr, 1, bo->pages);
676 	mutex_lock(&bo->mutex);
677 
678 	/* can be written by caller, not forced */
679 	if (page_nr != bo->pgnr) {
680 		dev_err(atomisp_dev,
681 			"get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n",
682 			bo->pgnr, page_nr);
683 		if (page_nr < 0)
684 			page_nr = 0;
685 		goto out_of_mem;
686 	}
687 
688 	return 0;
689 
690 out_of_mem:
691 
692 	free_user_pages(bo, page_nr);
693 
694 	return -ENOMEM;
695 }
696 
697 /*
698  * allocate/free physical pages for the bo.
699  *
700  * type indicate where are the pages from. currently we have 3 types
701  * of memory: HMM_BO_PRIVATE, HMM_BO_USER.
702  *
703  * userptr is only valid when type is HMM_BO_USER, it indicates
704  * the start address from user space task.
705  */
706 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
707 		       enum hmm_bo_type type,
708 		       const void __user *userptr)
709 {
710 	int ret = -EINVAL;
711 
712 	check_bo_null_return(bo, -EINVAL);
713 
714 	mutex_lock(&bo->mutex);
715 	check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
716 
717 	bo->pages = kcalloc(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
718 	if (unlikely(!bo->pages)) {
719 		ret = -ENOMEM;
720 		goto alloc_err;
721 	}
722 
723 	/*
724 	 * TO DO:
725 	 * add HMM_BO_USER type
726 	 */
727 	if (type == HMM_BO_PRIVATE) {
728 		ret = alloc_private_pages(bo);
729 	} else if (type == HMM_BO_USER) {
730 		ret = alloc_user_pages(bo, userptr);
731 	} else {
732 		dev_err(atomisp_dev, "invalid buffer type.\n");
733 		ret = -EINVAL;
734 	}
735 	if (ret)
736 		goto alloc_err;
737 
738 	bo->type = type;
739 
740 	bo->status |= HMM_BO_PAGE_ALLOCED;
741 
742 	mutex_unlock(&bo->mutex);
743 
744 	return 0;
745 
746 alloc_err:
747 	kfree(bo->pages);
748 	mutex_unlock(&bo->mutex);
749 	dev_err(atomisp_dev, "alloc pages err...\n");
750 	return ret;
751 status_err:
752 	mutex_unlock(&bo->mutex);
753 	dev_err(atomisp_dev,
754 		"buffer object has already page allocated.\n");
755 	return -EINVAL;
756 }
757 
758 /*
759  * free physical pages of the bo.
760  */
761 void hmm_bo_free_pages(struct hmm_buffer_object *bo)
762 {
763 	check_bo_null_return_void(bo);
764 
765 	mutex_lock(&bo->mutex);
766 
767 	check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
768 
769 	/* clear the flag anyway. */
770 	bo->status &= (~HMM_BO_PAGE_ALLOCED);
771 
772 	if (bo->type == HMM_BO_PRIVATE)
773 		free_private_bo_pages(bo);
774 	else if (bo->type == HMM_BO_USER)
775 		free_user_pages(bo, bo->pgnr);
776 	else
777 		dev_err(atomisp_dev, "invalid buffer type.\n");
778 
779 	kfree(bo->pages);
780 	mutex_unlock(&bo->mutex);
781 
782 	return;
783 
784 status_err2:
785 	mutex_unlock(&bo->mutex);
786 	dev_err(atomisp_dev,
787 		"buffer object not page allocated yet.\n");
788 }
789 
790 int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
791 {
792 	check_bo_null_return(bo, 0);
793 
794 	return bo->status & HMM_BO_PAGE_ALLOCED;
795 }
796 
797 /*
798  * bind the physical pages to a virtual address space.
799  */
800 int hmm_bo_bind(struct hmm_buffer_object *bo)
801 {
802 	int ret;
803 	unsigned int virt;
804 	struct hmm_bo_device *bdev;
805 	unsigned int i;
806 
807 	check_bo_null_return(bo, -EINVAL);
808 
809 	mutex_lock(&bo->mutex);
810 
811 	check_bo_status_yes_goto(bo,
812 				 HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
813 				 status_err1);
814 
815 	check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
816 
817 	bdev = bo->bdev;
818 
819 	virt = bo->start;
820 
821 	for (i = 0; i < bo->pgnr; i++) {
822 		ret =
823 		    isp_mmu_map(&bdev->mmu, virt,
824 				page_to_phys(bo->pages[i]), 1);
825 		if (ret)
826 			goto map_err;
827 		virt += (1 << PAGE_SHIFT);
828 	}
829 
830 	/*
831 	 * flush TBL here.
832 	 *
833 	 * theoretically, we donot need to flush TLB as we didnot change
834 	 * any existed address mappings, but for Silicon Hive's MMU, its
835 	 * really a bug here. I guess when fetching PTEs (page table entity)
836 	 * to TLB, its MMU will fetch additional INVALID PTEs automatically
837 	 * for performance issue. EX, we only set up 1 page address mapping,
838 	 * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
839 	 * so the additional 3 PTEs are invalid.
840 	 */
841 	if (bo->start != 0x0)
842 		isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
843 					(bo->pgnr << PAGE_SHIFT));
844 
845 	bo->status |= HMM_BO_BINDED;
846 
847 	mutex_unlock(&bo->mutex);
848 
849 	return 0;
850 
851 map_err:
852 	/* unbind the physical pages with related virtual address space */
853 	virt = bo->start;
854 	for ( ; i > 0; i--) {
855 		isp_mmu_unmap(&bdev->mmu, virt, 1);
856 		virt += pgnr_to_size(1);
857 	}
858 
859 	mutex_unlock(&bo->mutex);
860 	dev_err(atomisp_dev,
861 		"setup MMU address mapping failed.\n");
862 	return ret;
863 
864 status_err2:
865 	mutex_unlock(&bo->mutex);
866 	dev_err(atomisp_dev, "buffer object already binded.\n");
867 	return -EINVAL;
868 status_err1:
869 	mutex_unlock(&bo->mutex);
870 	dev_err(atomisp_dev,
871 		"buffer object vm_node or page not allocated.\n");
872 	return -EINVAL;
873 }
874 
875 /*
876  * unbind the physical pages with related virtual address space.
877  */
878 void hmm_bo_unbind(struct hmm_buffer_object *bo)
879 {
880 	unsigned int virt;
881 	struct hmm_bo_device *bdev;
882 	unsigned int i;
883 
884 	check_bo_null_return_void(bo);
885 
886 	mutex_lock(&bo->mutex);
887 
888 	check_bo_status_yes_goto(bo,
889 				 HMM_BO_PAGE_ALLOCED |
890 				 HMM_BO_ALLOCED |
891 				 HMM_BO_BINDED, status_err);
892 
893 	bdev = bo->bdev;
894 
895 	virt = bo->start;
896 
897 	for (i = 0; i < bo->pgnr; i++) {
898 		isp_mmu_unmap(&bdev->mmu, virt, 1);
899 		virt += pgnr_to_size(1);
900 	}
901 
902 	/*
903 	 * flush TLB as the address mapping has been removed and
904 	 * related TLBs should be invalidated.
905 	 */
906 	isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
907 				(bo->pgnr << PAGE_SHIFT));
908 
909 	bo->status &= (~HMM_BO_BINDED);
910 
911 	mutex_unlock(&bo->mutex);
912 
913 	return;
914 
915 status_err:
916 	mutex_unlock(&bo->mutex);
917 	dev_err(atomisp_dev,
918 		"buffer vm or page not allocated or not binded yet.\n");
919 }
920 
921 int hmm_bo_binded(struct hmm_buffer_object *bo)
922 {
923 	int ret;
924 
925 	check_bo_null_return(bo, 0);
926 
927 	mutex_lock(&bo->mutex);
928 
929 	ret = bo->status & HMM_BO_BINDED;
930 
931 	mutex_unlock(&bo->mutex);
932 
933 	return ret;
934 }
935 
936 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
937 {
938 	check_bo_null_return(bo, NULL);
939 
940 	mutex_lock(&bo->mutex);
941 	if (((bo->status & HMM_BO_VMAPED) && !cached) ||
942 	    ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
943 		mutex_unlock(&bo->mutex);
944 		return bo->vmap_addr;
945 	}
946 
947 	/* cached status need to be changed, so vunmap first */
948 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
949 		vunmap(bo->vmap_addr);
950 		bo->vmap_addr = NULL;
951 		bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
952 	}
953 
954 	bo->vmap_addr = vmap(bo->pages, bo->pgnr, VM_MAP,
955 			     cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
956 	if (unlikely(!bo->vmap_addr)) {
957 		mutex_unlock(&bo->mutex);
958 		dev_err(atomisp_dev, "vmap failed...\n");
959 		return NULL;
960 	}
961 	bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
962 
963 	mutex_unlock(&bo->mutex);
964 	return bo->vmap_addr;
965 }
966 
967 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
968 {
969 	check_bo_null_return_void(bo);
970 
971 	mutex_lock(&bo->mutex);
972 	if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
973 		mutex_unlock(&bo->mutex);
974 		return;
975 	}
976 
977 	clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
978 	mutex_unlock(&bo->mutex);
979 }
980 
981 void hmm_bo_vunmap(struct hmm_buffer_object *bo)
982 {
983 	check_bo_null_return_void(bo);
984 
985 	mutex_lock(&bo->mutex);
986 	if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
987 		vunmap(bo->vmap_addr);
988 		bo->vmap_addr = NULL;
989 		bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
990 	}
991 
992 	mutex_unlock(&bo->mutex);
993 	return;
994 }
995 
996 void hmm_bo_ref(struct hmm_buffer_object *bo)
997 {
998 	check_bo_null_return_void(bo);
999 
1000 	kref_get(&bo->kref);
1001 }
1002 
1003 static void kref_hmm_bo_release(struct kref *kref)
1004 {
1005 	if (!kref)
1006 		return;
1007 
1008 	hmm_bo_release(kref_to_hmm_bo(kref));
1009 }
1010 
1011 void hmm_bo_unref(struct hmm_buffer_object *bo)
1012 {
1013 	check_bo_null_return_void(bo);
1014 
1015 	kref_put(&bo->kref, kref_hmm_bo_release);
1016 }
1017 
1018 static void hmm_bo_vm_open(struct vm_area_struct *vma)
1019 {
1020 	struct hmm_buffer_object *bo =
1021 	    (struct hmm_buffer_object *)vma->vm_private_data;
1022 
1023 	check_bo_null_return_void(bo);
1024 
1025 	hmm_bo_ref(bo);
1026 
1027 	mutex_lock(&bo->mutex);
1028 
1029 	bo->status |= HMM_BO_MMAPED;
1030 
1031 	bo->mmap_count++;
1032 
1033 	mutex_unlock(&bo->mutex);
1034 }
1035 
1036 static void hmm_bo_vm_close(struct vm_area_struct *vma)
1037 {
1038 	struct hmm_buffer_object *bo =
1039 	    (struct hmm_buffer_object *)vma->vm_private_data;
1040 
1041 	check_bo_null_return_void(bo);
1042 
1043 	hmm_bo_unref(bo);
1044 
1045 	mutex_lock(&bo->mutex);
1046 
1047 	bo->mmap_count--;
1048 
1049 	if (!bo->mmap_count) {
1050 		bo->status &= (~HMM_BO_MMAPED);
1051 		vma->vm_private_data = NULL;
1052 	}
1053 
1054 	mutex_unlock(&bo->mutex);
1055 }
1056 
1057 static const struct vm_operations_struct hmm_bo_vm_ops = {
1058 	.open = hmm_bo_vm_open,
1059 	.close = hmm_bo_vm_close,
1060 };
1061 
1062 /*
1063  * mmap the bo to user space.
1064  */
1065 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1066 {
1067 	unsigned int start, end;
1068 	unsigned int virt;
1069 	unsigned int pgnr, i;
1070 	unsigned int pfn;
1071 
1072 	check_bo_null_return(bo, -EINVAL);
1073 
1074 	check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1075 
1076 	pgnr = bo->pgnr;
1077 	start = vma->vm_start;
1078 	end = vma->vm_end;
1079 
1080 	/*
1081 	 * check vma's virtual address space size and buffer object's size.
1082 	 * must be the same.
1083 	 */
1084 	if ((start + pgnr_to_size(pgnr)) != end) {
1085 		dev_warn(atomisp_dev,
1086 			 "vma's address space size not equal to buffer object's size");
1087 		return -EINVAL;
1088 	}
1089 
1090 	virt = vma->vm_start;
1091 	for (i = 0; i < pgnr; i++) {
1092 		pfn = page_to_pfn(bo->pages[i]);
1093 		if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1094 			dev_warn(atomisp_dev,
1095 				 "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",
1096 				 virt, pfn, 1);
1097 			return -EINVAL;
1098 		}
1099 		virt += PAGE_SIZE;
1100 	}
1101 
1102 	vma->vm_private_data = bo;
1103 
1104 	vma->vm_ops = &hmm_bo_vm_ops;
1105 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
1106 
1107 	/*
1108 	 * call hmm_bo_vm_open explicitly.
1109 	 */
1110 	hmm_bo_vm_open(vma);
1111 
1112 	return 0;
1113 
1114 status_err:
1115 	dev_err(atomisp_dev, "buffer page not allocated yet.\n");
1116 	return -EINVAL;
1117 }
1118