1 /*
2  *	<ofmem_sparc64.c>
3  *
4  *	OF Memory manager
5  *
6  *   Copyright (C) 1999-2004 Samuel Rydh (samuel@ibrium.se)
7  *   Copyright (C) 2004 Stefan Reinauer
8  *
9  *   This program is free software; you can redistribute it and/or
10  *   modify it under the terms of the GNU General Public License
11  *   as published by the Free Software Foundation
12  *
13  */
14 
15 #include "config.h"
16 #include "libopenbios/bindings.h"
17 #include "libc/string.h"
18 #include "arch/sparc64/ofmem_sparc64.h"
19 #include "spitfire.h"
20 
21 #define OF_MALLOC_BASE		((char*)OFMEM + ALIGN_SIZE(sizeof(ofmem_t), 8))
22 
23 #define MEMSIZE (192 * 1024)
24 static union {
25 	char memory[MEMSIZE];
26 	ofmem_t ofmem;
27 } s_ofmem_data;
28 
29 #define OFMEM      	(&s_ofmem_data.ofmem)
30 #define TOP_OF_RAM 	(s_ofmem_data.memory + MEMSIZE)
31 
32 static retain_t s_retained;
33 translation_t **g_ofmem_translations = &s_ofmem_data.ofmem.trans;
34 
35 ucell *va2ttedata = 0;
36 extern uint64_t qemu_mem_size;
37 
ALIGN_SIZE(size_t x,size_t a)38 static inline size_t ALIGN_SIZE(size_t x, size_t a)
39 {
40     return (x + a - 1) & ~(a-1);
41 }
42 
get_heap_top(void)43 static ucell get_heap_top( void )
44 {
45 	return (ucell)TOP_OF_RAM;
46 }
47 
ofmem_arch_get_private(void)48 ofmem_t* ofmem_arch_get_private(void)
49 {
50 	return OFMEM;
51 }
52 
ofmem_arch_get_malloc_base(void)53 void* ofmem_arch_get_malloc_base(void)
54 {
55 	return OF_MALLOC_BASE;
56 }
57 
ofmem_arch_get_heap_top(void)58 ucell ofmem_arch_get_heap_top(void)
59 {
60 	return get_heap_top();
61 }
62 
ofmem_arch_get_virt_top(void)63 ucell ofmem_arch_get_virt_top(void)
64 {
65 	return (ucell)OFMEM_VIRT_TOP;
66 }
67 
ofmem_arch_get_iomem_base(void)68 ucell ofmem_arch_get_iomem_base(void)
69 {
70 	return (ucell)&_iomem;
71 }
72 
ofmem_arch_get_iomem_top(void)73 ucell ofmem_arch_get_iomem_top(void)
74 {
75 	return (ucell)&_iomem + 0x8000;
76 }
77 
ofmem_arch_get_retained(void)78 retain_t *ofmem_arch_get_retained(void)
79 {
80 	return (&s_retained);
81 }
82 
ofmem_arch_get_translation_entry_size(void)83 int ofmem_arch_get_translation_entry_size(void)
84 {
85 	/* Return size of a single MMU package translation property entry in cells */
86 	return 3;
87 }
88 
ofmem_arch_create_translation_entry(ucell * transentry,translation_t * t)89 void ofmem_arch_create_translation_entry(ucell *transentry, translation_t *t)
90 {
91 	/* Generate translation property entry for SPARC. While there is no
92 	formal documentation for this, both Linux kernel and OpenSolaris sources
93 	expect a translation property entry to have the following layout:
94 
95 		virtual address
96 		length
97 		mode (valid TTE for start of translation region)
98 	*/
99 
100 	transentry[0] = t->virt;
101 	transentry[1] = t->size;
102 	transentry[2] = t->phys | t->mode | SPITFIRE_TTE_VALID;
103 }
104 
105 /* Return the size of a memory available entry given the phandle in cells */
ofmem_arch_get_available_entry_size(phandle_t ph)106 int ofmem_arch_get_available_entry_size(phandle_t ph)
107 {
108 	if (ph == s_phandle_memory) {
109 		return 1 + ofmem_arch_get_physaddr_cellsize();
110 	} else {
111 		return 1 + 1;
112 	}
113 }
114 
115 /* Generate memory available property entry for Sparc64 */
ofmem_arch_create_available_entry(phandle_t ph,ucell * availentry,phys_addr_t start,ucell size)116 void ofmem_arch_create_available_entry(phandle_t ph, ucell *availentry, phys_addr_t start, ucell size)
117 {
118 	int i = 0;
119 
120 	if (ph == s_phandle_memory) {
121 		i += ofmem_arch_encode_physaddr(availentry, start);
122 	} else {
123 		availentry[i++] = start;
124 	}
125 
126 	availentry[i] = size;
127 }
128 
129 /* Unmap a set of pages */
ofmem_arch_unmap_pages(ucell virt,ucell size)130 void ofmem_arch_unmap_pages(ucell virt, ucell size)
131 {
132     ucell va;
133 
134     /* align address to 8k */
135     virt &= ~PAGE_MASK_8K;
136 
137     /* align size to 8k */
138     size = (size + PAGE_MASK_8K) & ~PAGE_MASK_8K;
139 
140     for (va = virt; va < virt + size; va += PAGE_SIZE_8K) {
141         itlb_demap(va);
142         dtlb_demap(va);
143     }
144 }
145 
146 /* Map a set of pages */
ofmem_arch_map_pages(phys_addr_t phys,ucell virt,ucell size,ucell mode)147 void ofmem_arch_map_pages(phys_addr_t phys, ucell virt, ucell size, ucell mode)
148 {
149     unsigned long tte_data, currsize;
150 
151     /* Install locked tlb entries now */
152     if (mode & SPITFIRE_TTE_LOCKED) {
153 
154         /* aligned to 8k page */
155         size = (size + PAGE_MASK_8K) & ~PAGE_MASK_8K;
156 
157         while (size > 0) {
158             currsize = size;
159             if (currsize >= PAGE_SIZE_4M &&
160                 (virt & PAGE_MASK_4M) == 0 &&
161                 (phys & PAGE_MASK_4M) == 0) {
162                 currsize = PAGE_SIZE_4M;
163                 tte_data = 6ULL << 60;
164             } else if (currsize >= PAGE_SIZE_512K &&
165                    (virt & PAGE_MASK_512K) == 0 &&
166                    (phys & PAGE_MASK_512K) == 0) {
167                 currsize = PAGE_SIZE_512K;
168                 tte_data = 4ULL << 60;
169             } else if (currsize >= PAGE_SIZE_64K &&
170                    (virt & PAGE_MASK_64K) == 0 &&
171                    (phys & PAGE_MASK_64K) == 0) {
172                 currsize = PAGE_SIZE_64K;
173                 tte_data = 2ULL << 60;
174             } else {
175                 currsize = PAGE_SIZE_8K;
176                 tte_data = 0;
177             }
178 
179             tte_data |= phys | mode | SPITFIRE_TTE_VALID;
180 
181             itlb_load2(virt, tte_data);
182             dtlb_load2(virt, tte_data);
183 
184             size -= currsize;
185             phys += currsize;
186             virt += currsize;
187         }
188     }
189 }
190 
191 /************************************************************************/
192 /* misc                                                                 */
193 /************************************************************************/
194 
ofmem_arch_get_physaddr_cellsize(void)195 int ofmem_arch_get_physaddr_cellsize(void)
196 {
197     return 1;
198 }
199 
ofmem_arch_encode_physaddr(ucell * p,phys_addr_t value)200 int ofmem_arch_encode_physaddr(ucell *p, phys_addr_t value)
201 {
202     p[0] = value;
203     return 1;
204 }
205 
ofmem_arch_default_translation_mode(phys_addr_t phys)206 ucell ofmem_arch_default_translation_mode( phys_addr_t phys )
207 {
208 	/* Writable, cacheable */
209 	/* Privileged and not locked */
210 	return SPITFIRE_TTE_CP | SPITFIRE_TTE_CV | SPITFIRE_TTE_WRITABLE | SPITFIRE_TTE_PRIVILEGED;
211 }
212 
ofmem_arch_io_translation_mode(phys_addr_t phys)213 ucell ofmem_arch_io_translation_mode( phys_addr_t phys )
214 {
215 	/* Writable, privileged and not locked */
216 	return SPITFIRE_TTE_CV | SPITFIRE_TTE_WRITABLE | SPITFIRE_TTE_PRIVILEGED | SPITFIRE_TTE_EFFECT;
217 }
218 
219 /* Architecture-specific OFMEM helpers */
220 unsigned long
find_tte(unsigned long va)221 find_tte(unsigned long va)
222 {
223 	translation_t *t = *g_ofmem_translations;
224 	unsigned long tte_data;
225 
226 	/* Search the ofmem linked list for this virtual address */
227 	while (t != NULL) {
228 		/* Find the correct range */
229 		if (va >= t->virt && va < (t->virt + t->size)) {
230 
231 			/* valid tte, 8k size */
232 			tte_data = SPITFIRE_TTE_VALID;
233 
234 			/* mix in phys address mode */
235 			tte_data |= t->mode;
236 
237 			/* mix in page physical address = t->phys + offset */
238 			tte_data |= t->phys + (va - t->virt);
239 
240 			/* return tte_data */
241 			return tte_data;
242 		}
243 		t = t->next;
244 	}
245 
246 	/* Couldn't find tte */
247 	return -1;
248 }
249 
250 /* ITLB handlers */
251 void
itlb_load2(unsigned long vaddr,unsigned long tte_data)252 itlb_load2(unsigned long vaddr, unsigned long tte_data)
253 {
254     asm("stxa %0, [%1] %2\n"
255         "stxa %3, [%%g0] %4\n"
256         : : "r" (vaddr), "r" (48), "i" (ASI_IMMU),
257           "r" (tte_data), "i" (ASI_ITLB_DATA_IN));
258 }
259 
260 void
itlb_load3(unsigned long vaddr,unsigned long tte_data,unsigned long tte_index)261 itlb_load3(unsigned long vaddr, unsigned long tte_data,
262            unsigned long tte_index)
263 {
264     asm("stxa %0, [%1] %2\n"
265         "stxa %3, [%4] %5\n"
266         : : "r" (vaddr), "r" (48), "i" (ASI_IMMU),
267           "r" (tte_data), "r" (tte_index << 3), "i" (ASI_ITLB_DATA_ACCESS));
268 }
269 
270 unsigned long
itlb_faultva(void)271 itlb_faultva(void)
272 {
273     unsigned long faultva;
274 
275     asm("ldxa [%1] %2, %0\n"
276         : "=r" (faultva)
277         : "r" (48), "i" (ASI_IMMU));
278 
279     return faultva;
280 }
281 
282 void
itlb_demap(unsigned long vaddr)283 itlb_demap(unsigned long vaddr)
284 {
285     asm("stxa %0, [%0] %1\n"
286         : : "r" (vaddr), "i" (ASI_IMMU_DEMAP));
287 }
288 
289 /* DTLB handlers */
290 void
dtlb_load2(unsigned long vaddr,unsigned long tte_data)291 dtlb_load2(unsigned long vaddr, unsigned long tte_data)
292 {
293     asm("stxa %0, [%1] %2\n"
294         "stxa %3, [%%g0] %4\n"
295         : : "r" (vaddr), "r" (48), "i" (ASI_DMMU),
296           "r" (tte_data), "i" (ASI_DTLB_DATA_IN));
297 }
298 
299 void
dtlb_load3(unsigned long vaddr,unsigned long tte_data,unsigned long tte_index)300 dtlb_load3(unsigned long vaddr, unsigned long tte_data,
301            unsigned long tte_index)
302 {
303     asm("stxa %0, [%1] %2\n"
304         "stxa %3, [%4] %5\n"
305         : : "r" (vaddr), "r" (48), "i" (ASI_DMMU),
306           "r" (tte_data), "r" (tte_index << 3), "i" (ASI_DTLB_DATA_ACCESS));
307 }
308 
309 unsigned long
dtlb_faultva(void)310 dtlb_faultva(void)
311 {
312     unsigned long faultva;
313 
314     asm("ldxa [%1] %2, %0\n"
315         : "=r" (faultva)
316         : "r" (48), "i" (ASI_DMMU));
317 
318     return faultva;
319 }
320 
321 void
dtlb_demap(unsigned long vaddr)322 dtlb_demap(unsigned long vaddr)
323 {
324     asm("stxa %0, [%0] %1\n"
325         : : "r" (vaddr), "i" (ASI_DMMU_DEMAP));
326 }
327 
328 /************************************************************************/
329 /* init / cleanup                                                       */
330 /************************************************************************/
331 
remap_page_range(phys_addr_t phys,ucell virt,ucell size,ucell mode)332 static int remap_page_range( phys_addr_t phys, ucell virt, ucell size, ucell mode )
333 {
334 	ofmem_claim_phys(phys, size, 0);
335 	ofmem_claim_virt(virt, size, 0);
336 	ofmem_map_page_range(phys, virt, size, mode);
337 	if (!(mode & SPITFIRE_TTE_LOCKED)) {
338 		OFMEM_TRACE("remap_page_range clearing translation " FMT_ucellx
339 				" -> " FMT_ucellx " " FMT_ucellx " mode " FMT_ucellx "\n",
340 				virt, phys, size, mode );
341 		ofmem_arch_unmap_pages(virt, size);
342 	}
343 	return 0;
344 }
345 
346 #define RETAIN_MAGIC	0x1100220033004400
347 
ofmem_init(void)348 void ofmem_init( void )
349 {
350 	retain_t *retained = ofmem_arch_get_retained();
351 	int i;
352 
353 	memset(&s_ofmem_data, 0, sizeof(s_ofmem_data));
354 	s_ofmem_data.ofmem.ramsize = qemu_mem_size;
355 
356 	/* inherit translations set up by entry.S */
357 	ofmem_walk_boot_map(remap_page_range);
358 
359         /* Map the memory */
360         ofmem_map_page_range(PAGE_SIZE, PAGE_SIZE, 0x800000, 0x36);
361 
362 	if (!(retained->magic == RETAIN_MAGIC)) {
363 		OFMEM_TRACE("ofmem_init: no retained magic found, creating\n");
364 		retained->magic = RETAIN_MAGIC;
365 		retained->numentries = 0;
366 	} else {
367 		OFMEM_TRACE("ofmem_init: retained magic found, total %lld mappings\n", retained->numentries);
368 
369 		/* Mark physical addresses as used so they are not reallocated */
370 		for (i = 0; i < retained->numentries; i++) {
371 			ofmem_claim_phys(retained->retain_phys_range[i].start,
372 				retained->retain_phys_range[i].size, 0);
373 		}
374 
375 		/* Reset retained area for next reset */
376 		retained->magic = RETAIN_MAGIC;
377 		retained->numentries = 0;
378 	}
379 }
380