xref: /minix/minix/kernel/arch/earm/pg_utils.c (revision fb4fbf7a)
1 #include <minix/cpufeature.h>
2 
3 #include <minix/type.h>
4 #include <assert.h>
5 #include "kernel/kernel.h"
6 #include "arch_proto.h"
7 #include <machine/cpu.h>
8 #include <arm/armreg.h>
9 
10 #include <string.h>
11 #include <minix/type.h>
12 
13 /* These are set/computed in kernel.lds. */
14 extern char _kern_vir_base, _kern_phys_base, _kern_size;
15 
16 /* Retrieve the absolute values to something we can use. */
17 static phys_bytes kern_vir_start = (phys_bytes) &_kern_vir_base;
18 static phys_bytes kern_phys_start = (phys_bytes) &_kern_phys_base;
19 static phys_bytes kern_kernlen = (phys_bytes) &_kern_size;
20 
21 /* page directory we can use to map things */
22 static u32_t pagedir[4096]  __aligned(16384);
23 
24 void print_memmap(kinfo_t *cbi)
25 {
26 	int m;
27 	assert(cbi->mmap_size < MAXMEMMAP);
28 	for(m = 0; m < cbi->mmap_size; m++) {
29 		phys_bytes addr = cbi->memmap[m].mm_base_addr, endit = cbi->memmap[m].mm_base_addr + cbi->memmap[m].mm_length;
30 		printf("%08lx-%08lx ",addr, endit);
31 	}
32 	printf("\nsize %08lx\n", cbi->mmap_size);
33 }
34 
35 void cut_memmap(kinfo_t *cbi, phys_bytes start, phys_bytes end)
36 {
37 	int m;
38 	phys_bytes o;
39 
40 	if((o=start % ARM_PAGE_SIZE))
41 		start -= o;
42 	if((o=end % ARM_PAGE_SIZE))
43 		end += ARM_PAGE_SIZE - o;
44 
45 	assert(kernel_may_alloc);
46 
47 	for(m = 0; m < cbi->mmap_size; m++) {
48 		phys_bytes substart = start, subend = end;
49 		phys_bytes memaddr = cbi->memmap[m].mm_base_addr,
50 		    memend = cbi->memmap[m].mm_base_addr + cbi->memmap[m].mm_length;
51 
52 		/* adjust cut range to be a subset of the free memory */
53 		if(substart < memaddr) substart = memaddr;
54 		if(subend > memend) subend = memend;
55 		if(substart >= subend) continue;
56 
57 		/* if there is any overlap, forget this one and add
58 		 * 1-2 subranges back
59 		 */
60 		cbi->memmap[m].mm_base_addr = cbi->memmap[m].mm_length = 0;
61 		if(substart > memaddr)
62 			add_memmap(cbi, memaddr, substart-memaddr);
63 		if(subend < memend)
64 			add_memmap(cbi, subend, memend-subend);
65 	}
66 }
67 
68 void add_memmap(kinfo_t *cbi, u64_t addr, u64_t len)
69 {
70 	int m;
71 #define LIMIT 0xFFFFF000
72 	/* Truncate available memory at 4GB as the rest of minix
73 	 * currently can't deal with any bigger.
74 	 */
75 	if(addr > LIMIT) {
76 		return;
77 	}
78 
79 	if(addr + len > LIMIT) {
80 		len -= (addr + len - LIMIT);
81 	}
82 
83 	assert(cbi->mmap_size < MAXMEMMAP);
84 	if(len == 0) {
85 		return;
86 	}
87 	addr = roundup(addr, ARM_PAGE_SIZE);
88 	len = rounddown(len, ARM_PAGE_SIZE);
89 
90 	assert(kernel_may_alloc);
91 
92         for(m = 0; m < MAXMEMMAP; m++) {
93 		phys_bytes highmark;
94 		if(cbi->memmap[m].mm_length) {
95 			continue;
96 		}
97 		cbi->memmap[m].mm_base_addr = addr;
98 		cbi->memmap[m].mm_length = len;
99 		cbi->memmap[m].type = MULTIBOOT_MEMORY_AVAILABLE;
100 		if(m >= cbi->mmap_size) {
101 			cbi->mmap_size = m+1;
102 		}
103 		highmark = addr + len;
104 		if(highmark > cbi->mem_high_phys) {
105 			cbi->mem_high_phys = highmark;
106 		}
107 		return;
108         }
109 
110 	panic("no available memmap slot");
111 }
112 
113 u32_t *alloc_pagetable(phys_bytes *ph)
114 {
115 	u32_t *ret;
116 #define PG_PAGETABLES 24
117 	static u32_t pagetables[PG_PAGETABLES][256]  __aligned(1024);
118 	static int pt_inuse = 0;
119 	if(pt_inuse >= PG_PAGETABLES) {
120 		panic("no more pagetables");
121 	}
122 	assert(sizeof(pagetables[pt_inuse]) == 1024);
123 	ret = pagetables[pt_inuse++];
124 	*ph = vir2phys(ret);
125 	return ret;
126 }
127 
128 #define PAGE_KB (ARM_PAGE_SIZE / 1024)
129 
130 phys_bytes pg_alloc_page(kinfo_t *cbi)
131 {
132 	int m;
133 	multiboot_memory_map_t *mmap;
134 
135 	assert(kernel_may_alloc);
136 
137 	for(m = 0; m < cbi->mmap_size; m++) {
138 		mmap = &cbi->memmap[m];
139 		if(!mmap->mm_length) {
140 			continue;
141 		}
142 		assert(mmap->mm_length > 0);
143 		assert(!(mmap->mm_length % ARM_PAGE_SIZE));
144 		assert(!(mmap->mm_base_addr % ARM_PAGE_SIZE));
145 
146 		u32_t addr = mmap->mm_base_addr;
147 		mmap->mm_base_addr += ARM_PAGE_SIZE;
148 		mmap->mm_length  -= ARM_PAGE_SIZE;
149 
150 		cbi->kernel_allocated_bytes_dynamic += ARM_PAGE_SIZE;
151 
152 		return addr;
153 	}
154 
155 	panic("can't find free memory");
156 }
157 
158 void pg_identity(kinfo_t *cbi)
159 {
160 	uint32_t i;
161 	phys_bytes phys;
162 
163 	/* We map memory that does not correspond to physical memory
164 	 * as non-cacheable. Make sure we know what it is.
165 	 */
166 	assert(cbi->mem_high_phys);
167 
168         /* Set up an identity mapping page directory */
169 	 for(i = 0; i < ARM_VM_DIR_ENTRIES; i++) {
170 		u32_t flags = ARM_VM_SECTION
171 			| ARM_VM_SECTION_USER
172 			| ARM_VM_SECTION_DOMAIN;
173 
174 		phys = i * ARM_SECTION_SIZE;
175 		/* mark mormal memory as cacheable. TODO: fix hard coded values */
176 		if (phys >= PHYS_MEM_BEGIN && phys <= PHYS_MEM_END) {
177 			pagedir[i] =  phys | flags | ARM_VM_SECTION_CACHED;
178 		} else {
179 			pagedir[i] =  phys | flags | ARM_VM_SECTION_DEVICE;
180 		}
181         }
182 }
183 
184 int pg_mapkernel(void)
185 {
186 	int pde;
187 	u32_t mapped = 0, kern_phys = kern_phys_start;
188 
189 	assert(!(kern_vir_start % ARM_SECTION_SIZE));
190 	assert(!(kern_phys_start % ARM_SECTION_SIZE));
191 	pde = kern_vir_start / ARM_SECTION_SIZE; /* start pde */
192 	while(mapped < kern_kernlen) {
193 		pagedir[pde] = (kern_phys & ARM_VM_SECTION_MASK)
194 			| ARM_VM_SECTION
195 			| ARM_VM_SECTION_SUPER
196 			| ARM_VM_SECTION_DOMAIN
197 			| ARM_VM_SECTION_CACHED;
198 		mapped += ARM_SECTION_SIZE;
199 		kern_phys += ARM_SECTION_SIZE;
200 		pde++;
201 	}
202 	return pde;	/* free pde */
203 }
204 
205 void vm_enable_paging(void)
206 {
207 	u32_t sctlr;
208 	u32_t actlr;
209 
210 	write_ttbcr(0);
211 
212 	/* Set all Domains to Client */
213 	write_dacr(0x55555555);
214 
215 	sctlr = read_sctlr();
216 
217 	/* Enable MMU */
218 	sctlr |= CPU_CONTROL_MMU_ENABLE;
219 
220 	/* TRE set to zero (default reset value): TEX[2:0] are used, plus C and B bits.*/
221 	sctlr &= ~CPU_CONTROL_TR_ENABLE;
222 
223 	/* AFE set to zero (default reset value): not using simplified model. */
224 	sctlr &= ~CPU_CONTROL_AF_ENABLE;
225 
226 	/* Enable instruction ,data cache and branch prediction */
227 	sctlr |= CPU_CONTROL_DC_ENABLE;
228 	sctlr |= CPU_CONTROL_IC_ENABLE;
229 	sctlr |= CPU_CONTROL_BPRD_ENABLE;
230 
231 	/* Enable barriers */
232 	sctlr |= CPU_CONTROL_32BD_ENABLE;
233 
234 	/* Enable L2 cache (cortex-a8) */
235 	#define CORTEX_A8_L2EN   (0x02)
236 	actlr = read_actlr();
237 	actlr |= CORTEX_A8_L2EN;
238 	write_actlr(actlr);
239 
240 	write_sctlr(sctlr);
241 }
242 
243 phys_bytes pg_load(void)
244 {
245 	phys_bytes phpagedir = vir2phys(pagedir);
246 	write_ttbr0(phpagedir);
247 	return phpagedir;
248 }
249 
250 void pg_clear(void)
251 {
252 	memset(pagedir, 0, sizeof(pagedir));
253 }
254 
255 phys_bytes pg_rounddown(phys_bytes b)
256 {
257 	phys_bytes o;
258 	if(!(o = b % ARM_PAGE_SIZE)) {
259 		return b;
260 	}
261 	return b  - o;
262 }
263 
264 void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end,
265 	kinfo_t *cbi)
266 {
267 	static int mapped_pde = -1;
268 	static u32_t *pt = NULL;
269 	int pde, pte;
270 
271 	assert(kernel_may_alloc);
272 
273 	if(phys == PG_ALLOCATEME) {
274 		assert(!(vaddr % ARM_PAGE_SIZE));
275 	} else  {
276 		assert((vaddr % ARM_PAGE_SIZE) == (phys % ARM_PAGE_SIZE));
277 		vaddr = pg_rounddown(vaddr);
278 		phys = pg_rounddown(phys);
279 	}
280 	assert(vaddr < kern_vir_start);
281 
282 	while(vaddr < vaddr_end) {
283 		phys_bytes source = phys;
284 		assert(!(vaddr % ARM_PAGE_SIZE));
285 		if(phys == PG_ALLOCATEME) {
286 			source = pg_alloc_page(cbi);
287 		} else {
288 			assert(!(phys % ARM_PAGE_SIZE));
289 		}
290 		assert(!(source % ARM_PAGE_SIZE));
291 		pde = ARM_VM_PDE(vaddr);
292 		pte = ARM_VM_PTE(vaddr);
293 		if(mapped_pde < pde) {
294 			phys_bytes ph;
295 			pt = alloc_pagetable(&ph);
296 			pagedir[pde] = (ph & ARM_VM_PDE_MASK)
297 					| ARM_VM_PAGEDIR
298 					| ARM_VM_PDE_DOMAIN;
299 			mapped_pde = pde;
300 		}
301 		assert(pt);
302 		pt[pte] = (source & ARM_VM_PTE_MASK)
303 			| ARM_VM_PAGETABLE
304 			| ARM_VM_PTE_CACHED
305 			| ARM_VM_PTE_USER;
306 		vaddr += ARM_PAGE_SIZE;
307 		if(phys != PG_ALLOCATEME) {
308 			phys += ARM_PAGE_SIZE;
309 		}
310 	}
311 }
312 
313 void pg_info(reg_t *pagedir_ph, u32_t **pagedir_v)
314 {
315 	*pagedir_ph = vir2phys(pagedir);
316 	*pagedir_v = pagedir;
317 }
318