1 /* $Id: sun-mmu.c,v 1.12 2010/02/15 21:55:55 fredette Exp $ */
2 
3 /* machine/sun/sun-mmu.c - classic Sun MMU emulation implementation: */
4 
5 /*
6  * Copyright (c) 2003 Matt Fredette
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed by Matt Fredette.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #include <tme/common.h>
37 _TME_RCSID("$Id: sun-mmu.c,v 1.12 2010/02/15 21:55:55 fredette Exp $");
38 
39 /* includes: */
40 #include <tme/machine/sun.h>
41 
42 /* macros: */
43 #define TME_SUN_MMU_PMEG_TLBS	(16)
44 #define TME_SUN_MMU_CONTEXT_TLBS	(8)
45 
46 /* structures: */
47 
48 /* an allocated TLB set in a classic two-level Sun MMU: */
49 struct tme_sun_mmu_tlb_set {
50 
51   /* the next allocated TLB set: */
52   struct tme_sun_mmu_tlb_set *tme_sun_mmu_tlb_set_next;
53 
54   /* the TLB set information: */
55   struct tme_bus_tlb_set_info tme_sun_mmu_tlb_set_info;
56 };
57 
58 /* one PMEG in a classic two-level Sun MMU: */
59 struct tme_sun_mmu_pmeg {
60 
61   /* the current list of TLBs using a page table entry in this PMEG, and
62      the head within that list: */
63   struct tme_token *tme_sun_mmu_pmeg_tlb_tokens[TME_SUN_MMU_PMEG_TLBS];
64   unsigned int tme_sun_mmu_pmeg_tlbs_head;
65 };
66 
67 /* the private structure for a classic two-level Sun MMU: */
68 struct tme_sun_mmu {
69 
70   /* the information provided by the user: */
71   struct tme_sun_mmu_info tme_sun_mmu_info;
72 #define tme_sun_mmu_element tme_sun_mmu_info.tme_sun_mmu_info_element
73 #define tme_sun_mmu_address_bits tme_sun_mmu_info.tme_sun_mmu_info_address_bits
74 #define tme_sun_mmu_pgoffset_bits tme_sun_mmu_info.tme_sun_mmu_info_pgoffset_bits
75 #define tme_sun_mmu_pteindex_bits tme_sun_mmu_info.tme_sun_mmu_info_pteindex_bits
76 #define tme_sun_mmu_contexts tme_sun_mmu_info.tme_sun_mmu_info_contexts
77 #define tme_sun_mmu_pmegs_count tme_sun_mmu_info.tme_sun_mmu_info_pmegs
78 #define _tme_sun_mmu_tlb_fill_private tme_sun_mmu_info.tme_sun_mmu_info_tlb_fill_private
79 #define _tme_sun_mmu_tlb_fill tme_sun_mmu_info.tme_sun_mmu_info_tlb_fill
80 
81   /* if nonzero, this address space has a hole, and this has only the
82      last true address bit set: */
83   tme_uint32_t tme_sun_mmu_address_hole_bit;
84 
85   /* a PTE for addresses in the hole.  this is always all-bits-zero: */
86   struct tme_sun_mmu_pte tme_sun_mmu_address_hole_pte;
87 
88   /* the number of bits in a segment map index: */
89   tme_uint8_t tme_sun_mmu_segment_bits;
90 
91   /* the segment map: */
92   unsigned short *tme_sun_mmu_segment_map;
93 
94   /* the PMEGs: */
95   struct tme_sun_mmu_pmeg *tme_sun_mmu_pmegs;
96 
97   /* the PTEs: */
98   struct tme_sun_mmu_pte *tme_sun_mmu_ptes;
99 
100   /* the allocated TLB sets: */
101   struct tme_sun_mmu_tlb_set *tme_sun_mmu_tlb_sets;
102 
103   /* the current list of TLBs that must be invalidated when the
104      context changes: */
105   struct tme_token *tme_sun_mmu_context_tlb_tokens[TME_SUN_MMU_CONTEXT_TLBS];
106   unsigned int tme_sun_mmu_context_tlbs_head;
107 };
108 
109 /* this creates a classic two-level Sun MMU: */
110 void *
tme_sun_mmu_new(struct tme_sun_mmu_info * info)111 tme_sun_mmu_new(struct tme_sun_mmu_info *info)
112 {
113   struct tme_sun_mmu *mmu;
114   unsigned int segmap_count;
115   unsigned int segmap_i;
116 
117   /* allocate the new private structure: */
118   mmu = tme_new0(struct tme_sun_mmu, 1);
119 
120   /* copy the user-provided information: */
121   mmu->tme_sun_mmu_info = *info;
122 
123   /* if there is an address hole: */
124   if (mmu->tme_sun_mmu_info.tme_sun_mmu_info_topindex_bits < 0) {
125 
126     /* there must be 32 address bits: */
127     assert (mmu->tme_sun_mmu_address_bits == 32);
128 
129     /* adjust the number of address bits for the hole: */
130     mmu->tme_sun_mmu_address_bits += (mmu->tme_sun_mmu_info.tme_sun_mmu_info_topindex_bits + 1);
131 
132     /* make the hole address bit: */
133     mmu->tme_sun_mmu_address_hole_bit = TME_BIT(mmu->tme_sun_mmu_address_bits - 1);
134 
135     /* zero the number of top index bits: */
136     mmu->tme_sun_mmu_info.tme_sun_mmu_info_topindex_bits = 0;
137   }
138 
139   /* allocate the segment map and initialize it to all invalid: */
140   mmu->tme_sun_mmu_segment_bits = (mmu->tme_sun_mmu_address_bits
141 				   - (mmu->tme_sun_mmu_pteindex_bits
142 				      + mmu->tme_sun_mmu_pgoffset_bits));
143   segmap_count = (mmu->tme_sun_mmu_contexts
144 		  * (1 << mmu->tme_sun_mmu_segment_bits));
145   mmu->tme_sun_mmu_segment_map = tme_new(unsigned short, segmap_count);
146   for (segmap_i = 0; segmap_i < segmap_count; segmap_i++) {
147     mmu->tme_sun_mmu_segment_map[segmap_i] = mmu->tme_sun_mmu_pmegs_count - 1;
148   }
149 
150   /* allocate the PMEGs: */
151   mmu->tme_sun_mmu_pmegs = tme_new0(struct tme_sun_mmu_pmeg, mmu->tme_sun_mmu_pmegs_count);
152 
153   /* allocate the PTEs: */
154   mmu->tme_sun_mmu_ptes =
155     tme_new0(struct tme_sun_mmu_pte,
156 	     mmu->tme_sun_mmu_pmegs_count
157 	     * (1 << mmu->tme_sun_mmu_pteindex_bits));
158 
159   /* done: */
160   return (mmu);
161 }
162 
163 /* given a context and an address, returns the segment map index and
164    PTE: */
165 static unsigned short
_tme_sun_mmu_lookup(struct tme_sun_mmu * mmu,tme_uint8_t context,tme_uint32_t address,struct tme_sun_mmu_pte ** _pte)166 _tme_sun_mmu_lookup(struct tme_sun_mmu *mmu, tme_uint8_t context, tme_uint32_t address,
167 		    struct tme_sun_mmu_pte **_pte)
168 {
169   unsigned short pteindex;
170   unsigned short segment;
171   unsigned short segment_map_index;
172   unsigned short pmeg;
173 
174   /* if there is an address hole, and this address is in it: */
175   if (__tme_predict_false(((address
176 			    + (address & mmu->tme_sun_mmu_address_hole_bit))
177 			   & (((tme_uint32_t) 0)
178 			      - mmu->tme_sun_mmu_address_hole_bit)) != 0)) {
179 
180     /* return the hole PTE, and zero for the segment map index: */
181     *_pte = &mmu->tme_sun_mmu_address_hole_pte;
182     return (0);
183   }
184 
185   /* lose the page offset bits: */
186   address >>= mmu->tme_sun_mmu_pgoffset_bits;
187 
188   /* get the PTE index: */
189   pteindex = (address
190 	      & (TME_BIT(mmu->tme_sun_mmu_pteindex_bits) - 1));
191   address >>= mmu->tme_sun_mmu_pteindex_bits;
192 
193   /* get the segment number: */
194   segment = (address
195 	     & (TME_BIT(mmu->tme_sun_mmu_segment_bits) - 1));
196 
197   /* get the segment map index: */
198   segment_map_index = ((context << mmu->tme_sun_mmu_segment_bits)
199 		       | segment);
200 
201   /* get the PMEG: */
202   pmeg = mmu->tme_sun_mmu_segment_map[segment_map_index];
203 
204   /* return the segment map index and the PTE: */
205   *_pte = (mmu->tme_sun_mmu_ptes + (pmeg << mmu->tme_sun_mmu_pteindex_bits) + pteindex);
206   return (segment_map_index);
207 }
208 
209 /* this invalidates all TLB entries that may be affected by changes to
210    a PMEG: */
211 static void
_tme_sun_mmu_pmeg_invalidate(struct tme_sun_mmu * mmu,unsigned short segment_map_index)212 _tme_sun_mmu_pmeg_invalidate(struct tme_sun_mmu *mmu, unsigned short segment_map_index)
213 {
214   struct tme_sun_mmu_pmeg *pmeg;
215   int tlb_i;
216   struct tme_token *token;
217 
218   /* get the PMEG: */
219   pmeg = mmu->tme_sun_mmu_pmegs + mmu->tme_sun_mmu_segment_map[segment_map_index];
220 
221   /* invalidate all of the TLBs: */
222   for (tlb_i = 0; tlb_i < TME_SUN_MMU_PMEG_TLBS; tlb_i++) {
223     token = pmeg->tme_sun_mmu_pmeg_tlb_tokens[tlb_i];
224     pmeg->tme_sun_mmu_pmeg_tlb_tokens[tlb_i] = NULL;
225     if (token != NULL) {
226       tme_token_invalidate(token);
227     }
228   }
229 }
230 
231 /* this gets a PTE: */
232 int
tme_sun_mmu_pte_get(void * _mmu,tme_uint8_t context,tme_uint32_t address,struct tme_sun_mmu_pte * _pte)233 tme_sun_mmu_pte_get(void *_mmu, tme_uint8_t context, tme_uint32_t address,
234 		    struct tme_sun_mmu_pte *_pte)
235 {
236   struct tme_sun_mmu *mmu;
237   unsigned short segment_map_index;
238   struct tme_sun_mmu_pte *pte;
239 
240   /* lookup this address: */
241   mmu = (struct tme_sun_mmu *) _mmu;
242   segment_map_index = _tme_sun_mmu_lookup(mmu, context, address, &pte);
243 
244   /* otherwise, copy the PTE: */
245   *_pte = *pte;
246   return (TME_OK);
247 }
248 
249 /* this sets a PTE: */
250 int
tme_sun_mmu_pte_set(void * _mmu,tme_uint8_t context,tme_uint32_t address,struct tme_sun_mmu_pte * _pte)251 tme_sun_mmu_pte_set(void *_mmu, tme_uint8_t context, tme_uint32_t address,
252 		    struct tme_sun_mmu_pte *_pte)
253 {
254   struct tme_sun_mmu *mmu;
255   unsigned short segment_map_index;
256   struct tme_sun_mmu_pte *pte;
257 
258   /* lookup this address: */
259   mmu = (struct tme_sun_mmu *) _mmu;
260   segment_map_index = _tme_sun_mmu_lookup(mmu, context, address, &pte);
261   if (__tme_predict_false(pte == &mmu->tme_sun_mmu_address_hole_pte)) {
262     return (TME_OK);
263   }
264 
265   /* invalidate all TLB entries that are affected by changes to this PMEG: */
266   _tme_sun_mmu_pmeg_invalidate(mmu, segment_map_index);
267 
268   /* otherwise, copy the PTE: */
269   *pte = *_pte;
270   return (TME_OK);
271 }
272 
273 /* this gets a segment map entry: */
274 unsigned short
tme_sun_mmu_segmap_get(void * _mmu,tme_uint8_t context,tme_uint32_t address)275 tme_sun_mmu_segmap_get(void *_mmu, tme_uint8_t context, tme_uint32_t address)
276 {
277   struct tme_sun_mmu *mmu;
278   struct tme_sun_mmu_pte *pte;
279   unsigned short segment_map_index, pmeg;
280 
281   /* lookup this address: */
282   mmu = (struct tme_sun_mmu *) _mmu;
283   segment_map_index = _tme_sun_mmu_lookup(mmu, context, address, &pte);
284   if (__tme_predict_false(pte == &mmu->tme_sun_mmu_address_hole_pte)) {
285     return (mmu->tme_sun_mmu_pmegs_count - 1);
286   }
287   pmeg = mmu->tme_sun_mmu_segment_map[segment_map_index];
288   tme_log(&mmu->tme_sun_mmu_element->tme_element_log_handle, 1000, TME_OK,
289 	  (&mmu->tme_sun_mmu_element->tme_element_log_handle,
290 	   "segmap_get: SEGMAP[%d:0x%08x] -> 0x%04x",
291 	   context,
292 	   address,
293 	   pmeg));
294   return (pmeg);
295 }
296 
297 /* this sets a segment map entry: */
298 void
tme_sun_mmu_segmap_set(void * _mmu,tme_uint8_t context,tme_uint32_t address,unsigned short pmeg)299 tme_sun_mmu_segmap_set(void *_mmu, tme_uint8_t context, tme_uint32_t address, unsigned short pmeg)
300 {
301   struct tme_sun_mmu *mmu;
302   unsigned short segment_map_index;
303   struct tme_sun_mmu_pte *pte;
304 
305   /* lookup this address: */
306   mmu = (struct tme_sun_mmu *) _mmu;
307   segment_map_index = _tme_sun_mmu_lookup(mmu, context, address, &pte);
308   if (__tme_predict_false(pte == &mmu->tme_sun_mmu_address_hole_pte)) {
309     return;
310   }
311 
312   /* invalidate all TLB entries that are affected by changes to this
313      PMEG - losing a spot in the segment map counts as such a change: */
314   _tme_sun_mmu_pmeg_invalidate(mmu, segment_map_index);
315 
316   /* set the new segment: */
317   mmu->tme_sun_mmu_segment_map[segment_map_index] = pmeg;
318   tme_log(&mmu->tme_sun_mmu_element->tme_element_log_handle, 1000, TME_OK,
319 	  (&mmu->tme_sun_mmu_element->tme_element_log_handle,
320 	   "segmap_set: SEGMAP[%d:0x%08x] <- 0x%04x",
321 	   context,
322 	   address,
323 	   pmeg));
324 }
325 
326 /* this fills a TLB entry: */
327 unsigned short
tme_sun_mmu_tlb_fill(void * _mmu,struct tme_bus_tlb * tlb,tme_uint8_t context,tme_uint32_t address,unsigned short access)328 tme_sun_mmu_tlb_fill(void *_mmu, struct tme_bus_tlb *tlb,
329 		     tme_uint8_t context, tme_uint32_t address, unsigned short access)
330 {
331   struct tme_sun_mmu *mmu;
332   unsigned short segment_map_index;
333   struct tme_sun_mmu_pte *pte;
334   tme_bus_addr32_t addr_first, addr_last;
335   unsigned short protection, protection_other, tlb_valid_for;
336   tme_uint32_t physical_address;
337   struct tme_sun_mmu_pmeg *pmeg;
338   struct tme_bus_tlb tlb_virtual;
339   struct tme_token *token_old;
340   int tlb_i;
341 
342   /* the access must be a read or write by the system or user: */
343   assert(access != 0
344 	 && (access == TME_SUN_MMU_PTE_PROT_SYSTEM(TME_SUN_MMU_PTE_PROT_RO)
345 	     || access == TME_SUN_MMU_PTE_PROT_SYSTEM(TME_SUN_MMU_PTE_PROT_RW)
346 	     || access == TME_SUN_MMU_PTE_PROT_USER(TME_SUN_MMU_PTE_PROT_RO)
347 	     || access == TME_SUN_MMU_PTE_PROT_USER(TME_SUN_MMU_PTE_PROT_RW)));
348 
349   /* lookup this address: */
350   mmu = (struct tme_sun_mmu *) _mmu;
351   segment_map_index = _tme_sun_mmu_lookup(mmu, context, address, &pte);
352   addr_first = (address & ~(TME_BIT(mmu->tme_sun_mmu_pgoffset_bits) - 1));
353   addr_last = (address | (TME_BIT(mmu->tme_sun_mmu_pgoffset_bits) - 1));
354 
355   /* remember this TLB entry in the PMEG: */
356   if (__tme_predict_true(pte != &mmu->tme_sun_mmu_address_hole_pte)) {
357     pmeg = mmu->tme_sun_mmu_pmegs + mmu->tme_sun_mmu_segment_map[segment_map_index];
358     tlb_i = pmeg->tme_sun_mmu_pmeg_tlbs_head;
359     token_old = pmeg->tme_sun_mmu_pmeg_tlb_tokens[tlb_i];
360     if (token_old != NULL
361 	&& token_old != tlb->tme_bus_tlb_token) {
362       tme_token_invalidate(token_old);
363     }
364     pmeg->tme_sun_mmu_pmeg_tlb_tokens[tlb_i]
365       = tlb->tme_bus_tlb_token;
366     pmeg->tme_sun_mmu_pmeg_tlbs_head = (tlb_i + 1) & (TME_SUN_MMU_PMEG_TLBS - 1);
367   }
368 
369   /* if this page is invalid, return the page-invalid cycle handler,
370      which is valid for reading and writing for the user and system: */
371   if (!(pte->tme_sun_mmu_pte_flags & TME_SUN_MMU_PTE_VALID)) {
372     tme_bus_tlb_initialize(tlb);
373     tlb->tme_bus_tlb_addr_first = addr_first;
374     tlb->tme_bus_tlb_addr_last = addr_last;
375     tlb->tme_bus_tlb_cycles_ok = TME_BUS_CYCLE_READ | TME_BUS_CYCLE_WRITE;
376     tlb->tme_bus_tlb_cycle_private = mmu->tme_sun_mmu_info.tme_sun_mmu_info_invalid_private;
377     tlb->tme_bus_tlb_cycle = mmu->tme_sun_mmu_info.tme_sun_mmu_info_invalid;
378     return (TME_SUN_MMU_TLB_SYSTEM | TME_SUN_MMU_TLB_USER);
379   }
380 
381   /* otherwise, this page is valid.  get the relevant part of the
382      protection for this accessor (system or user), the part of the
383      protection covering the other accessor (system or user), adjust
384      "access" to be an unshifted TME_SUN_MMU_PTE_PROT_ value, and get
385      the accessor (user or system) that this TLB entry will definitely
386      be valid for: */
387   protection = pte->tme_sun_mmu_pte_flags;
388   if (access & TME_SUN_MMU_PTE_PROT_SYSTEM(TME_SUN_MMU_PTE_PROT_MASK)) {
389     protection_other = protection / TME_SUN_MMU_PTE_PROT_USER(1);
390     access /= TME_SUN_MMU_PTE_PROT_SYSTEM(1);
391     protection /= TME_SUN_MMU_PTE_PROT_SYSTEM(1);
392     tlb_valid_for = TME_SUN_MMU_TLB_SYSTEM;
393   }
394   else {
395     protection_other = protection / TME_SUN_MMU_PTE_PROT_SYSTEM(1);
396     access /= TME_SUN_MMU_PTE_PROT_USER(1);
397     protection /= TME_SUN_MMU_PTE_PROT_USER(1);
398     tlb_valid_for = TME_SUN_MMU_TLB_USER;
399   }
400 
401   /* NB that the following code assumes a particular ordering of
402      TME_SUN_MMU_PTE_PROT_ values.  specifically, it assumes that
403      ABORT < ERROR < RO < RW: */
404 
405   /* if the part of the protection covering the other accessor (system
406      or user) allows at least as much access as the relevant part of
407      the protection, this TLB entry will be valid for that other
408      successor as well.  we rely on particular definitions of the
409      TME_SUN_MMU_TLB_ macros to make this fast: */
410 #if (3 - TME_SUN_MMU_TLB_SYSTEM) != TME_SUN_MMU_TLB_USER
411 #error "TME_SUN_MMU_TLB_USER and TME_SUN_MMU_TLB_SYSTEM are incompatible"
412 #endif
413   protection &= TME_SUN_MMU_PTE_PROT_MASK;
414   protection_other &= TME_SUN_MMU_PTE_PROT_MASK;
415   if (protection_other >= protection) {
416     tlb_valid_for |= (3 - tlb_valid_for);
417   }
418 
419   /* if the access is protected, return the protection-error cycle
420      handler: */
421   if (protection < access) {
422     if (protection == TME_SUN_MMU_PTE_PROT_ABORT) {
423       abort();
424     }
425     tme_bus_tlb_initialize(tlb);
426     tlb->tme_bus_tlb_addr_first = addr_first;
427     tlb->tme_bus_tlb_addr_last = addr_last;
428     tlb->tme_bus_tlb_cycles_ok = (TME_BUS_CYCLE_WRITE
429 				  | (protection == TME_SUN_MMU_PTE_PROT_ERROR
430 				     ? TME_BUS_CYCLE_READ
431 				     : 0));
432     tlb->tme_bus_tlb_cycle_private = mmu->tme_sun_mmu_info.tme_sun_mmu_info_proterr_private;
433     tlb->tme_bus_tlb_cycle = mmu->tme_sun_mmu_info.tme_sun_mmu_info_proterr;
434     return (tlb_valid_for);
435   }
436 
437   /* this access is OK.  fill the TLB with physical bus information.
438      we pass in the virtual address as the initial physical address
439      because sometimes the virtual part of the address can influence
440      the physical address (as in the Sun-2 PROM mapping): */
441   physical_address = address;
442   (*mmu->_tme_sun_mmu_tlb_fill)
443     (mmu->_tme_sun_mmu_tlb_fill_private,
444      tlb,
445      pte,
446      &physical_address,
447      ((access == TME_SUN_MMU_PTE_PROT_RW)
448       ? TME_BUS_CYCLE_WRITE
449       : TME_BUS_CYCLE_READ));
450 
451   /* create the mapping TLB entry, and update the PTE flags: */
452   tlb_virtual.tme_bus_tlb_addr_first = addr_first;
453   tlb_virtual.tme_bus_tlb_addr_last = addr_last;
454   pte->tme_sun_mmu_pte_flags |= TME_SUN_MMU_PTE_REF;
455   tlb_virtual.tme_bus_tlb_cycles_ok = TME_BUS_CYCLE_READ;
456   if (access == TME_SUN_MMU_PTE_PROT_RW) {
457     pte->tme_sun_mmu_pte_flags |= TME_SUN_MMU_PTE_MOD;
458   }
459   if (protection == TME_SUN_MMU_PTE_PROT_RW
460       && (pte->tme_sun_mmu_pte_flags & TME_SUN_MMU_PTE_MOD)) {
461     tlb_virtual.tme_bus_tlb_cycles_ok |= TME_BUS_CYCLE_WRITE;
462   }
463 
464   /* map the filled TLB entry: */
465   tme_bus_tlb_map(tlb, physical_address, &tlb_virtual, address);
466 
467   /* return who this TLB entry is good for: */
468   return (tlb_valid_for);
469 }
470 
471 /* this invalidates all TLB entries in all TLB sets: */
472 void
tme_sun_mmu_tlbs_invalidate(void * _mmu)473 tme_sun_mmu_tlbs_invalidate(void *_mmu)
474 {
475   struct tme_sun_mmu *mmu;
476   struct tme_sun_mmu_tlb_set *tlb_set;
477 
478   /* recover our MMU: */
479   mmu = (struct tme_sun_mmu *) _mmu;
480 
481   /* invalidate all TLB entries in all sets: */
482   for (tlb_set = mmu->tme_sun_mmu_tlb_sets;
483        tlb_set != NULL;
484        tlb_set = tlb_set->tme_sun_mmu_tlb_set_next) {
485     tme_bus_tlb_set_invalidate(&tlb_set->tme_sun_mmu_tlb_set_info);
486   }
487 }
488 
489 /* this adds a TLB entry as dependent on the current context: */
490 void
tme_sun_mmu_context_add(void * _mmu,const struct tme_bus_tlb * tlb)491 tme_sun_mmu_context_add(void *_mmu,
492 			const struct tme_bus_tlb *tlb)
493 {
494   struct tme_sun_mmu *mmu;
495   tme_uint32_t address;
496   tme_uint32_t segment_bits;
497   tme_uint32_t segments_per_context;
498   signed long segment_map_index;
499   tme_uint32_t pmeg;
500   unsigned long tlb_i;
501   struct tme_token *token_old;
502 
503   /* recover our MMU: */
504   mmu = (struct tme_sun_mmu *) _mmu;
505 
506   /* get the address used with the MMU: */
507   /* NB: if there is an address hole, this address could be in it.  if
508      it is, it doesn't matter if we decide that this TLB entry is
509      valid in all contexts or not - this TLB entry never needs to be
510      invalidated because of a context change: */
511   address = tlb->tme_bus_tlb_addr_first;
512 
513   /* get the number of bits in a segment number: */
514   segment_bits = mmu->tme_sun_mmu_segment_bits;
515 
516   /* get the number of segment map entries per context: */
517   segments_per_context = 1 << segment_bits;
518 
519   /* get the segment map index for this address in the last
520      context: */
521   segment_map_index
522     = (((address
523 	 >> (mmu->tme_sun_mmu_pgoffset_bits
524 	     + mmu->tme_sun_mmu_pteindex_bits))
525 	& (segments_per_context - 1))
526        + ((mmu->tme_sun_mmu_contexts - 1)
527 	  << segment_bits));
528 
529   /* get the PMEG for this address in the last context: */
530   pmeg = mmu->tme_sun_mmu_segment_map[segment_map_index];
531 
532   /* there must be at least two contexts: */
533   assert (mmu->tme_sun_mmu_contexts >= 2);
534 
535   /* loop over the segment map indices for this address in all other
536      contexts: */
537   segment_map_index -= segments_per_context;
538   do {
539 
540     /* if the PMEG for this address in this context is different: */
541     if (__tme_predict_false(mmu->tme_sun_mmu_segment_map[segment_map_index] != pmeg)) {
542 
543       /* this address doesn't have the same mapping in all contexts,
544 	 so we must invalidate this TLB entry when the context
545 	 changes: */
546       tlb_i = mmu->tme_sun_mmu_context_tlbs_head;
547       token_old = mmu->tme_sun_mmu_context_tlb_tokens[tlb_i];
548       if (token_old != NULL
549 	  && token_old != tlb->tme_bus_tlb_token) {
550 	tme_token_invalidate(token_old);
551       }
552       mmu->tme_sun_mmu_context_tlb_tokens[tlb_i] = tlb->tme_bus_tlb_token;
553       mmu->tme_sun_mmu_context_tlbs_head = (tlb_i + 1) % TME_SUN_MMU_CONTEXT_TLBS;
554 
555       return;
556     }
557 
558   } while ((segment_map_index -= segments_per_context) >= 0);
559 
560   /* this address has the same mapping in all contexts, so we don't
561      need to invalidate this TLB entry when the context changes: */
562 }
563 
564 /* this is called after a context switch, to invalidate TLB entries
565    that were dependent on the previous context: */
566 void
tme_sun_mmu_context_switched(void * _mmu)567 tme_sun_mmu_context_switched(void *_mmu)
568 {
569   struct tme_sun_mmu *mmu;
570   signed long tlb_i;
571   struct tme_token *token;
572 
573   /* recover our MMU: */
574   mmu = (struct tme_sun_mmu *) _mmu;
575 
576   /* invalidate all of the TLBs that depended on the previous
577      context: */
578   tlb_i = TME_SUN_MMU_CONTEXT_TLBS - 1;
579   do {
580     token = mmu->tme_sun_mmu_context_tlb_tokens[tlb_i];
581     mmu->tme_sun_mmu_context_tlb_tokens[tlb_i] = NULL;
582     if (token != NULL) {
583       tme_token_invalidate(token);
584     }
585   } while (--tlb_i >= 0);
586 }
587 
588 /* this adds a new TLB set: */
589 int
tme_sun_mmu_tlb_set_add(void * _mmu,struct tme_bus_tlb_set_info * tlb_set_info)590 tme_sun_mmu_tlb_set_add(void *_mmu,
591 			struct tme_bus_tlb_set_info *tlb_set_info)
592 {
593   struct tme_sun_mmu *mmu;
594   struct tme_sun_mmu_tlb_set *tlb_set;
595 
596   /* recover our mmu: */
597   mmu = (struct tme_sun_mmu *) _mmu;
598 
599   /* remember this set: */
600   tlb_set = tme_new0(struct tme_sun_mmu_tlb_set, 1);
601   tlb_set->tme_sun_mmu_tlb_set_next = mmu->tme_sun_mmu_tlb_sets;
602   tlb_set->tme_sun_mmu_tlb_set_info = *tlb_set_info;
603   mmu->tme_sun_mmu_tlb_sets = tlb_set;
604 
605   return (TME_OK);
606 }
607 
608