1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Support for Medifield PNW Camera Imaging ISP subsystem.
4  *
5  * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6  *
7  * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License version
11  * 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  *
19  */
20 /*
21  * This file contains functions for reserved memory pool management
22  */
23 #include <linux/kernel.h>
24 #include <linux/types.h>
25 #include <linux/mm.h>
26 
27 #include <asm/set_memory.h>
28 
29 #include "atomisp_internal.h"
30 #include "hmm/hmm_pool.h"
31 
32 /*
33  * reserved memory pool ops.
34  */
get_pages_from_reserved_pool(void * pool,struct hmm_page_object * page_obj,unsigned int size,bool cached)35 static unsigned int get_pages_from_reserved_pool(void *pool,
36 	struct hmm_page_object *page_obj,
37 	unsigned int size, bool cached)
38 {
39 	unsigned long flags;
40 	unsigned int i = 0;
41 	unsigned int repool_pgnr;
42 	int j;
43 	struct hmm_reserved_pool_info *repool_info = pool;
44 
45 	if (!repool_info)
46 		return 0;
47 
48 	spin_lock_irqsave(&repool_info->list_lock, flags);
49 	if (repool_info->initialized) {
50 		repool_pgnr = repool_info->index;
51 
52 		for (j = repool_pgnr - 1; j >= 0; j--) {
53 			page_obj[i].page = repool_info->pages[j];
54 			page_obj[i].type = HMM_PAGE_TYPE_RESERVED;
55 			i++;
56 			repool_info->index--;
57 			if (i == size)
58 				break;
59 		}
60 	}
61 	spin_unlock_irqrestore(&repool_info->list_lock, flags);
62 	return i;
63 }
64 
free_pages_to_reserved_pool(void * pool,struct hmm_page_object * page_obj)65 static void free_pages_to_reserved_pool(void *pool,
66 					struct hmm_page_object *page_obj)
67 {
68 	unsigned long flags;
69 	struct hmm_reserved_pool_info *repool_info = pool;
70 
71 	if (!repool_info)
72 		return;
73 
74 	spin_lock_irqsave(&repool_info->list_lock, flags);
75 
76 	if (repool_info->initialized &&
77 	    repool_info->index < repool_info->pgnr &&
78 	    page_obj->type == HMM_PAGE_TYPE_RESERVED) {
79 		repool_info->pages[repool_info->index++] = page_obj->page;
80 	}
81 
82 	spin_unlock_irqrestore(&repool_info->list_lock, flags);
83 }
84 
hmm_reserved_pool_setup(struct hmm_reserved_pool_info ** repool_info,unsigned int pool_size)85 static int hmm_reserved_pool_setup(struct hmm_reserved_pool_info **repool_info,
86 				   unsigned int pool_size)
87 {
88 	struct hmm_reserved_pool_info *pool_info;
89 
90 	pool_info = kmalloc(sizeof(struct hmm_reserved_pool_info),
91 			    GFP_KERNEL);
92 	if (unlikely(!pool_info))
93 		return -ENOMEM;
94 
95 	pool_info->pages = kmalloc(sizeof(struct page *) * pool_size,
96 				   GFP_KERNEL);
97 	if (unlikely(!pool_info->pages)) {
98 		kfree(pool_info);
99 		return -ENOMEM;
100 	}
101 
102 	pool_info->index = 0;
103 	pool_info->pgnr = 0;
104 	spin_lock_init(&pool_info->list_lock);
105 	pool_info->initialized = true;
106 
107 	*repool_info = pool_info;
108 
109 	return 0;
110 }
111 
hmm_reserved_pool_init(void ** pool,unsigned int pool_size)112 static int hmm_reserved_pool_init(void **pool, unsigned int pool_size)
113 {
114 	int ret;
115 	unsigned int blk_pgnr;
116 	unsigned int pgnr = pool_size;
117 	unsigned int order = 0;
118 	unsigned int i = 0;
119 	int fail_number = 0;
120 	struct page *pages;
121 	int j;
122 	struct hmm_reserved_pool_info *repool_info;
123 
124 	if (pool_size == 0)
125 		return 0;
126 
127 	ret = hmm_reserved_pool_setup(&repool_info, pool_size);
128 	if (ret) {
129 		dev_err(atomisp_dev, "hmm_reserved_pool_setup failed.\n");
130 		return ret;
131 	}
132 
133 	pgnr = pool_size;
134 
135 	i = 0;
136 	order = MAX_ORDER;
137 
138 	while (pgnr) {
139 		blk_pgnr = 1U << order;
140 		while (blk_pgnr > pgnr) {
141 			order--;
142 			blk_pgnr >>= 1U;
143 		}
144 		BUG_ON(order > MAX_ORDER);
145 
146 		pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN, order);
147 		if (unlikely(!pages)) {
148 			if (order == 0) {
149 				fail_number++;
150 				dev_err(atomisp_dev, "%s: alloc_pages failed: %d\n",
151 					__func__, fail_number);
152 				/* if fail five times, will goto end */
153 
154 				/* FIXME: whether is the mechanism is ok? */
155 				if (fail_number == ALLOC_PAGE_FAIL_NUM)
156 					goto end;
157 			} else {
158 				order--;
159 			}
160 		} else {
161 			blk_pgnr = 1U << order;
162 
163 			ret = set_pages_uc(pages, blk_pgnr);
164 			if (ret) {
165 				dev_err(atomisp_dev,
166 					"set pages uncached failed\n");
167 				__free_pages(pages, order);
168 				goto end;
169 			}
170 
171 			for (j = 0; j < blk_pgnr; j++)
172 				repool_info->pages[i++] = pages + j;
173 
174 			repool_info->index += blk_pgnr;
175 			repool_info->pgnr += blk_pgnr;
176 
177 			pgnr -= blk_pgnr;
178 
179 			fail_number = 0;
180 		}
181 	}
182 
183 end:
184 	repool_info->initialized = true;
185 
186 	*pool = repool_info;
187 
188 	dev_info(atomisp_dev,
189 		 "hmm_reserved_pool init successfully,hmm_reserved_pool is with %d pages.\n",
190 		 repool_info->pgnr);
191 	return 0;
192 }
193 
hmm_reserved_pool_exit(void ** pool)194 static void hmm_reserved_pool_exit(void **pool)
195 {
196 	unsigned long flags;
197 	int i, ret;
198 	unsigned int pgnr;
199 	struct hmm_reserved_pool_info *repool_info = *pool;
200 
201 	if (!repool_info)
202 		return;
203 
204 	spin_lock_irqsave(&repool_info->list_lock, flags);
205 	if (!repool_info->initialized) {
206 		spin_unlock_irqrestore(&repool_info->list_lock, flags);
207 		return;
208 	}
209 	pgnr = repool_info->pgnr;
210 	repool_info->index = 0;
211 	repool_info->pgnr = 0;
212 	repool_info->initialized = false;
213 	spin_unlock_irqrestore(&repool_info->list_lock, flags);
214 
215 	for (i = 0; i < pgnr; i++) {
216 		ret = set_pages_wb(repool_info->pages[i], 1);
217 		if (ret)
218 			dev_err(atomisp_dev,
219 				"set page to WB err...ret=%d\n", ret);
220 		/*
221 		W/A: set_pages_wb seldom return value = -EFAULT
222 		indicate that address of page is not in valid
223 		range(0xffff880000000000~0xffffc7ffffffffff)
224 		then, _free_pages would panic; Do not know why
225 		page address be valid, it maybe memory corruption by lowmemory
226 		*/
227 		if (!ret)
228 			__free_pages(repool_info->pages[i], 0);
229 	}
230 
231 	kfree(repool_info->pages);
232 	kfree(repool_info);
233 
234 	*pool = NULL;
235 }
236 
hmm_reserved_pool_inited(void * pool)237 static int hmm_reserved_pool_inited(void *pool)
238 {
239 	struct hmm_reserved_pool_info *repool_info = pool;
240 
241 	if (!repool_info)
242 		return 0;
243 
244 	return repool_info->initialized;
245 }
246 
247 struct hmm_pool_ops reserved_pops = {
248 	.pool_init		= hmm_reserved_pool_init,
249 	.pool_exit		= hmm_reserved_pool_exit,
250 	.pool_alloc_pages	= get_pages_from_reserved_pool,
251 	.pool_free_pages	= free_pages_to_reserved_pool,
252 	.pool_inited		= hmm_reserved_pool_inited,
253 };
254