1 /* $Id: stp222x-iommu.c,v 1.3 2010/06/05 18:59:29 fredette Exp $ */
2 
3 /* ic/stp222x-iommu.c - emulation of the IOMMU of the UPA to SBus
4    interface controller (STP2220) and the UPA to PCI interface
5    controller (STP2222): */
6 
7 /*
8  * Copyright (c) 2009 Matt Fredette
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *      This product includes software developed by Matt Fredette.
22  * 4. The name of the author may not be used to endorse or promote products
23  *    derived from this software without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
26  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
27  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
33  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
34  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <tme/common.h>
39 _TME_RCSID("$Id: stp222x-iommu.c,v 1.3 2010/06/05 18:59:29 fredette Exp $");
40 
41 /* includes: */
42 #include "stp222x-impl.h"
43 
44 /* macros: */
45 
46 /* IOMMU register offsets: */
47 #define TME_STP222X_IOMMU_REGGROUP_INDEX_CR		TME_STP222X_REGGROUP_INDEX(0x00)
48 #define TME_STP222X_IOMMU_REGGROUP_INDEX_TSB		TME_STP222X_REGGROUP_INDEX(0x08)
49 #define TME_STP222X_IOMMU_REGGROUP_INDEX_FLUSH		TME_STP222X_REGGROUP_INDEX(0x10)
50 #define TME_STP222X_IOMMU_REGGROUP_INDEX_DIAG4_VA	TME_STP222X_REGGROUP_INDEX(0x00)
51 #define TME_STP222X_IOMMU_REGGROUP_INDEX_DIAG4_COMPARE	TME_STP222X_REGGROUP_INDEX(0x08)
52 
53 /* the IOMMU control register: */
54 #define TME_STP222X_IOMMU_CR_XLT_ERR_MASK		((2 << 26) - (1 << 25))
55 #define TME_STP222X_IOMMU_CR_XLT_ERR			(1 << 24)
56 #define TME_STP222X_IOMMU_CR_LRU_LCKEN			(1 << 23)
57 #define TME_STP222X_IOMMU_CR_LRU_LCKPTR			((2 << 22) - (1 << 19))
58 #define TME_STP222X_IOMMU_CR_TSB_SIZE_MASK		((2 << 18) - (1 << 16))
59 #define TME_STP222X_IOMMU_CR_TBW_SIZE_MASK		(1 << 2)
60 #define  TME_STP222X_IOMMU_CR_TBW_SIZE_64KB		 (1 << 2)
61 #define  TME_STP222X_IOMMU_CR_TBW_SIZE_8KB		 (0 << 2)
62 #define TME_STP222X_IOMMU_CR_MMU_DE			(1 << 1)
63 #define TME_STP222X_IOMMU_CR_MMU_EN			(1 << 0)
64 #define TME_STP222X_IOMMU_CR_MBZ			\
65   (((((tme_uint32_t) 2) << 31) - (1 << 27))		\
66    | ((2 << 15) - (1 << 3)))
67 
68 /* an IOMMU TTE: */
69 #define TME_STP222X_IOMMU_TTE_DATA_V			(((tme_uint64_t) 1) << 63)
70 #define TME_STP222X_IOMMU_TTE_DATA_SIZE_MASK		(((tme_uint64_t) 1) << 61)
71 #define  TME_STP222X_IOMMU_TTE_DATA_SIZE_64KB		 (((tme_uint64_t) 1) << 61)
72 #define  TME_STP222X_IOMMU_TTE_DATA_SIZE_8KB		 (((tme_uint64_t) 0) << 61)
73 #define TME_STP222X_IOMMU_TTE_STREAM			(((tme_uint64_t) 1) << 60)
74 #define TME_STP2220_IOMMU_TTE_LOCALBUS			(((tme_uint64_t) 1) << 59)
75 #define TME_STP222X_IOMMU_TTE_DATA_PA			((((tme_uint64_t) 2) << 40) - (1 << 13))
76 #define TME_STP222X_IOMMU_TTE_CACHEABLE			(1 << 4)
77 #define TME_STP222X_IOMMU_TTE_DATA_W			(1 << 1)
78 
79 /* an IOMMU TLB data: */
80 #define TME_STP222X_IOMMU_TLB_DATA_V			(1 << 30)
81 #define TME_STP2220_IOMMU_TLB_DATA_LOCAL		(1 << 29)
82 #define TME_STP222X_IOMMU_TLB_DATA_C			(1 << 28)
83 #define TME_STP222X_IOMMU_TLB_DATA_PPN			((2 << 27) - (1 << 0))
84 
85 /* an IOMMU TLB tag: */
86 #define TME_STP222X_IOMMU_TLB_TAG_ERROR_MASK		((2 << 24) - (1 << 23))
87 #define  TME_STP222X_IOMMU_TLB_TAG_ERROR_PROTECTION	 (0 << 23)
88 #define  TME_STP222X_IOMMU_TLB_TAG_ERROR_INVALID	 (1 << 23)
89 #define  TME_STP222X_IOMMU_TLB_TAG_ERROR_TIMEOUT	 (2 << 23)
90 #define  TME_STP222X_IOMMU_TLB_TAG_ERROR_ECC_UE		 (3 << 23)
91 #define TME_STP222X_IOMMU_TLB_TAG_ERROR			(1 << 22)
92 #define TME_STP222X_IOMMU_TLB_TAG_W			(1 << 21)
93 #define TME_STP222X_IOMMU_TLB_TAG_S			(1 << 20)
94 #define TME_STP222X_IOMMU_TLB_TAG_SIZE_MASK		(1 << 19)
95 #define  TME_STP222X_IOMMU_TLB_TAG_SIZE_64KB		 (1 << 19)
96 #define  TME_STP222X_IOMMU_TLB_TAG_SIZE_8KB		 (0 << 19)
97 #define TME_STP222X_IOMMU_TLB_TAG_VPN			((2 << 18) - (1 << 0))
98 
99 /* _tme_stp222x_iommu_lookup() returns a mash of TLB tag and TLB data
100    information.  the non-page-number fields of tag and data don't
101    overlap, and we don't include the page numbers, which leaves bits
102    left over.  we use these bits for additional information: */
103 /* NB: the specific values for TME_STP222X_IOMMU_TLB_MASH_UPA_* are
104    chosen to make it easy to quickly generate the corresponding
105    address mask: */
106 #define TME_STP222X_IOMMU_TLB_MASH_UPA_41		(1 << 27)
107 #define TME_STP222X_IOMMU_TLB_MASH_UPA_31		(1 << 17)
108 #define TME_STP222X_IOMMU_TLB_MASH_FIXED		(1 << 16)
109 #define TME_STP222X_IOMMU_TLB_MASH_INVALID_REQUEST	(1 << 15)
110 #define TME_STP222X_IOMMU_TLB_MASH_MISS			(1 << 14)
111 #define TME_STP222X_IOMMU_TLB_MASH_TLB_I(n)		(n)
112 #define TME_STP222X_IOMMU_TLB_MASH_TLB_I_WHICH(x)	((x) % TME_STP222X_IOMMU_TLB_SIZE)
113 
114 /* sizes: */
115 #define TME_STP222X_IOMMU_SIZE_LOG2_8KB			(13)
116 #define TME_STP222X_IOMMU_SIZE_LOG2_64KB		(16)
117 #define TME_STP222X_IOMMU_SIZE_8KB			(1 << TME_STP222X_IOMMU_SIZE_LOG2_8KB)
118 #define TME_STP222X_IOMMU_SIZE_64KB			(1 << TME_STP222X_IOMMU_SIZE_LOG2_64KB)
119 
120 /* this returns the log2 of the TBW size: */
121 static tme_uint32_t
_tme_stp222x_iommu_tbw_size_log2(tme_uint32_t iommu_cr)122 _tme_stp222x_iommu_tbw_size_log2(tme_uint32_t iommu_cr)
123 {
124   tme_uint32_t tbw_size;
125 
126   /* shift TBW_SIZE down to bit zero and clear all other bits: */
127   tbw_size
128     = ((iommu_cr
129 	/ TME_STP222X_IOMMU_CR_TBW_SIZE_MASK)
130        & 1);
131 
132   /* multiply this by the difference between log2(64KB) and log2(8KB),
133      and then add log2(8KB).  if TBW_SIZE was set, the result will be
134      log2(64KB), otherwise it will be log2(8KB): */
135   return ((tbw_size
136 	   * (TME_STP222X_IOMMU_SIZE_LOG2_64KB
137 	      - TME_STP222X_IOMMU_SIZE_LOG2_8KB))
138 	  + TME_STP222X_IOMMU_SIZE_LOG2_8KB);
139 }
140 
141 /* this returns the VPN mask for a TLB entry: */
142 static tme_uint32_t
_tme_stp222x_iommu_tlb_tag_vpn_mask(tme_uint32_t tlb_tag)143 _tme_stp222x_iommu_tlb_tag_vpn_mask(tme_uint32_t tlb_tag)
144 {
145   tme_uint32_t tlb_tag_vpn_mask;
146 
147   /* copy the TLB entry tag into the TLB tag VPN mask, and shift the
148      page size bit down to bit zero and clear all other bits: */
149   tlb_tag_vpn_mask
150     = ((tlb_tag
151 	/ TME_STP222X_IOMMU_TLB_TAG_SIZE_64KB)
152        & 1);
153 
154   /* add in (64KB / 8KB) - 1.  if the page size is 64KB, this will
155      leave bits 0..2 zero and bit 3 set, otherwise the page size is
156      8KB and this will leave bits 0..2 one and bit 3 clear: */
157   tlb_tag_vpn_mask
158     += ((TME_STP222X_IOMMU_SIZE_64KB
159 	 / TME_STP222X_IOMMU_SIZE_8KB)
160 	- 1);
161 
162   /* set bits 3..18 of the TLB tag VPN mask: */
163   tlb_tag_vpn_mask
164     |= (TME_STP222X_IOMMU_TLB_TAG_VPN
165 	- ((TME_STP222X_IOMMU_SIZE_64KB
166 	    / TME_STP222X_IOMMU_SIZE_8KB)
167 	   - 1));
168 
169   /* return the VPN mask: */
170   return (tlb_tag_vpn_mask);
171 }
172 
173 /* this returns the page size from a TLB tag: */
174 static tme_uint32_t
_tme_stp222x_iommu_tlb_page_size(tme_uint32_t tlb_tag)175 _tme_stp222x_iommu_tlb_page_size(tme_uint32_t tlb_tag)
176 {
177   tme_uint32_t page_size;
178 
179   /* shift the page size bit down into bit 13 (8KB) and clear all
180      other bits: */
181 #if TME_STP222X_IOMMU_TLB_TAG_SIZE_MASK < TME_STP222X_IOMMU_SIZE_8KB
182 #error "TME_STP222X_IOMMU_TLB_TAG_SIZE_MASK changed"
183 #endif
184   page_size
185     = ((tlb_tag
186 	/ (TME_STP222X_IOMMU_TLB_TAG_SIZE_MASK
187 	   / TME_STP222X_IOMMU_SIZE_8KB))
188        & TME_STP222X_IOMMU_SIZE_8KB);
189 
190   /* add 64KB - 8KB to this value.  the result will be 64KB if the
191      page size is 64KB, and (64KB - 8KB) if the page size is 8KB: */
192 #if TME_STP222X_IOMMU_TLB_TAG_SIZE_64KB != TME_STP222X_IOMMU_TLB_TAG_SIZE_MASK
193 #error "TME_STP222X_IOMMU_TLB_TAG_SIZE_ values changed"
194 #endif
195   page_size
196     += (TME_STP222X_IOMMU_SIZE_64KB
197 	- TME_STP222X_IOMMU_SIZE_8KB);
198 
199   /* clear all other bits except for 64KB and 8KB.  only one of them
200      will be set: */
201   page_size
202     &= (TME_STP222X_IOMMU_SIZE_64KB
203 	| TME_STP222X_IOMMU_SIZE_8KB);
204 
205   return (page_size);
206 }
207 
208 /* this looks up an address in the IOMMU and returns a mash of TLB
209    entry tag and data information: */
210 static tme_uint32_t
_tme_stp222x_iommu_tlb_mash(const struct tme_stp222x * stp222x,tme_bus_addr64_t io_address,unsigned int cycle_type)211 _tme_stp222x_iommu_tlb_mash(const struct tme_stp222x *stp222x,
212 			    tme_bus_addr64_t io_address,
213 			    unsigned int cycle_type)
214 {
215   tme_uint32_t iommu_cr;
216   tme_uint32_t io_space_size_log2;
217   tme_uint32_t io_address_0_31;
218   tme_uint32_t io_address_tag;
219   unsigned long tlb_i;
220   unsigned int tlb_count;
221   tme_uint32_t tlb_tag;
222 
223   /* get the IOMMU control register: */
224   iommu_cr = stp222x->tme_stp222x_iommu_cr;
225 
226   /* if this is an stp2220: */
227   if (TME_STP222X_IS_2220(stp222x)) {
228 
229     /* if the IOMMU is disabled: */
230     if ((iommu_cr & TME_STP222X_IOMMU_CR_MMU_EN) == 0) {
231 
232       /* XXX FIXME - what does the stp2220 do if the IOMMU is
233 	 disabled? */
234       abort();
235     }
236 
237     /* otherwise, this address is translated: */
238     /* XXX FIXME - does the STP2220 IOMMU have any address ranges that
239        are bypassed, even when the IOMMU is enabled? */
240   }
241 
242   /* otherwise, this is an stp2222: */
243   else {
244 
245     /* if this lookup is for a Dual Address Cycle: */
246     if (FALSE) {
247 
248       /* if this is a bypass address: */
249       if ((io_address >> 50) == 0x3fff) {
250 
251 	/* bits 0..40 of the address become the UPA address.  if bit
252 	   40 is clear, the address is cacheable: */
253 	return ((io_address & (((tme_uint64_t) 1) << 40))
254 		? (TME_STP222X_IOMMU_TLB_MASH_FIXED
255 		   | TME_STP222X_IOMMU_TLB_MASH_UPA_41
256 		   | TME_STP222X_IOMMU_TLB_DATA_V
257 		   | TME_STP222X_IOMMU_TLB_TAG_W
258 		   | TME_STP222X_IOMMU_TLB_TAG_SIZE_8KB)
259 		: (TME_STP222X_IOMMU_TLB_MASH_FIXED
260 		   | TME_STP222X_IOMMU_TLB_MASH_UPA_41
261 		   | TME_STP222X_IOMMU_TLB_DATA_V
262 		   | TME_STP222X_IOMMU_TLB_DATA_C
263 		   | TME_STP222X_IOMMU_TLB_TAG_W
264 		   | TME_STP222X_IOMMU_TLB_TAG_SIZE_8KB));
265       }
266 
267       /* otherwise, this address is local: */
268       else {
269 	return (TME_STP222X_IOMMU_TLB_MASH_FIXED
270 		| TME_STP222X_IOMMU_TLB_DATA_V
271 		| TME_STP2220_IOMMU_TLB_DATA_LOCAL
272 		| TME_STP222X_IOMMU_TLB_TAG_W
273 		| TME_STP222X_IOMMU_TLB_TAG_SIZE_8KB);
274       }
275     }
276 
277     /* otherwise, this is a Single Address Cycle: */
278     else {
279 
280       /* if this address is translated or passed-through: */
281       if (io_address & (((tme_uint32_t) 1) << 31)) {
282 
283 	/* if this address is passed-through, bits 0..30 of the
284 	   address become the cacheable UPA address: */
285 	if ((iommu_cr & TME_STP222X_IOMMU_CR_MMU_EN) == 0) {
286 	  return (TME_STP222X_IOMMU_TLB_MASH_FIXED
287 		  | TME_STP222X_IOMMU_TLB_MASH_UPA_31
288 		  | TME_STP222X_IOMMU_TLB_DATA_V
289 		  | TME_STP222X_IOMMU_TLB_TAG_W
290 		  | TME_STP222X_IOMMU_TLB_DATA_C
291 		  | TME_STP222X_IOMMU_TLB_TAG_SIZE_8KB);
292 	}
293 
294 	/* otherwise, this address is translated: */
295       }
296 
297       /* otherwise, this address is local: */
298       else {
299 	return (TME_STP222X_IOMMU_TLB_MASH_FIXED
300 		| TME_STP222X_IOMMU_TLB_DATA_V
301 		| TME_STP2220_IOMMU_TLB_DATA_LOCAL
302 		| TME_STP222X_IOMMU_TLB_TAG_W
303 		| TME_STP222X_IOMMU_TLB_TAG_SIZE_8KB);
304       }
305     }
306   }
307 
308   /* truncate the I/O address to 32 bits: */
309   io_address_0_31 = io_address;
310 
311   /* get the size of the translatable I/O address space: */
312   io_space_size_log2
313     = (_tme_stp222x_iommu_tbw_size_log2(iommu_cr)
314        + (10	/* log2(one TSB table size unit entry count) */
315 	  + TME_FIELD_MASK_EXTRACTU(iommu_cr, TME_STP222X_IOMMU_CR_TSB_SIZE_MASK)));
316 
317   /* if this I/O address is not within the translatable space: */
318   /* "Hardware does not prevent illegal combinations [of TSB_SIZ and
319      TBW_SIZ] from being programmed.  If an illegal combination is
320      programmed into the IOMMU, all translation requests will be
321      rejected as invalid." */
322   if (io_space_size_log2 >= 32
323       || (io_address_0_31
324 	  < (0
325 	     - (((tme_uint32_t) 1) << io_space_size_log2)))) {
326 
327     /* this translation request is invalid: */
328     return (TME_STP222X_IOMMU_TLB_MASH_FIXED
329 	    | TME_STP222X_IOMMU_TLB_MASH_INVALID_REQUEST
330 	    | TME_STP222X_IOMMU_TLB_TAG_SIZE_8KB);
331   }
332 
333   /* convert the I/O address into something that can be easily
334      compared to a TLB tag: */
335   io_address_tag = io_address_0_31 / TME_STP222X_IOMMU_SIZE_8KB;
336 
337   /* loop over the TLB entries from most to least recently used: */
338   tlb_i = stp222x->tme_stp222x_iommu_tlb_i_mru;
339   tlb_count = TME_STP222X_IOMMU_TLB_SIZE;
340   for (;;) {
341 
342     /* get this TLB entry tag: */
343     tlb_tag = stp222x->tme_stp222x_iommu_tlb_tags[tlb_i];
344 
345     /* if we hit in this TLB entry: */
346     if (((tlb_tag
347 	  ^ io_address_tag)
348 	 & _tme_stp222x_iommu_tlb_tag_vpn_mask(tlb_tag)) == 0) {
349 
350       /* this address hit in the TLB: */
351       return (TME_STP222X_IOMMU_TLB_MASH_TLB_I(tlb_i)
352 	      | (tlb_tag
353 		 & (TME_STP222X_IOMMU_TLB_TAG_ERROR_MASK
354 		    | TME_STP222X_IOMMU_TLB_TAG_ERROR
355 		    | TME_STP222X_IOMMU_TLB_TAG_W
356 		    | TME_STP222X_IOMMU_TLB_TAG_S
357 		    | TME_STP222X_IOMMU_TLB_TAG_SIZE_MASK))
358 	      | (stp222x->tme_stp222x_iommu_tlb_data[tlb_i]
359 		 & (TME_STP222X_IOMMU_TLB_DATA_V
360 		    | TME_STP2220_IOMMU_TLB_DATA_LOCAL
361 		    | TME_STP222X_IOMMU_TLB_DATA_C)));
362     }
363 
364     /* advance to the next TLB entry in the LRU list: */
365     tlb_i = stp222x->tme_stp222x_iommu_lru_next(tlb_i);
366     tlb_count--;
367     assert ((tlb_count == 0) == (tlb_i == stp222x->tme_stp222x_iommu_tlb_i_mru));
368 
369     /* if we missed in all of the TLB entries: */
370     if (__tme_predict_false(tlb_count == 0)) {
371 
372       /* this address missed in the TLB: */
373       return (TME_STP222X_IOMMU_TLB_MASH_MISS
374 	      | TME_STP222X_IOMMU_TLB_TAG_SIZE_8KB);
375     }
376   }
377   /* NOTREACHED */
378 }
379 
380 /* this looks up an address in the IOMMU and returns a mash of TLB
381    entry tag and data information and any slave connection index.  if
382    there is a slave connection index, the address is converted to be
383    connection-relative: */
384 static tme_uint32_t
_tme_stp222x_iommu_tlb_mash_slave(struct tme_bus_connection * io_conn_bus,tme_bus_addr64_t * _io_to_slave_address,unsigned int cycle_type,tme_uint32_t * _slave_conn_index)385 _tme_stp222x_iommu_tlb_mash_slave(struct tme_bus_connection *io_conn_bus,
386 				  tme_bus_addr64_t *_io_to_slave_address,
387 				  unsigned int cycle_type,
388 				  tme_uint32_t *_slave_conn_index)
389 {
390   struct tme_stp222x *stp222x;
391   tme_uint32_t tlb_mash;
392   unsigned long tlb_i;
393   unsigned long tlb_i_next;
394   unsigned long tlb_i_prev;
395   tme_bus_addr64_t upa_address;
396   tme_uint32_t page_size_m1;
397 
398   /* recover our data structure: */
399   stp222x = io_conn_bus->tme_bus_connection.tme_connection_element->tme_element_private;
400 
401   /* look up this address in the IOMMU TLB: */
402   tlb_mash = _tme_stp222x_iommu_tlb_mash(stp222x,
403 					 *_io_to_slave_address,
404 					 cycle_type);
405 
406   /* XXX FIXME - is the LRU only updated on valid translations? */
407 
408   /* if this address hit in the IOMMU TLB: */
409   if ((tlb_mash
410        & (TME_STP222X_IOMMU_TLB_MASH_FIXED
411 	  | TME_STP222X_IOMMU_TLB_MASH_MISS)) == 0) {
412 
413     /* if the hit TLB entry is not already the most recently used: */
414     tlb_i = TME_STP222X_IOMMU_TLB_MASH_TLB_I_WHICH(tlb_mash);
415     if (tlb_i != stp222x->tme_stp222x_iommu_tlb_i_mru) {
416 
417       /* remove this TLB entry from the LRU list: */
418       tlb_i_next = stp222x->tme_stp222x_iommu_lru_next(tlb_i);
419       tlb_i_prev = stp222x->tme_stp222x_iommu_lru_prev(tlb_i);
420       stp222x->tme_stp222x_iommu_lru_next(tlb_i_prev) = tlb_i_next;
421       stp222x->tme_stp222x_iommu_lru_prev(tlb_i_next) = tlb_i_prev;
422 
423       /* add this TLB entry in the LRU list before the old most
424 	 recently used TLB entry: */
425       tlb_i_next = stp222x->tme_stp222x_iommu_tlb_i_mru;
426       tlb_i_prev = stp222x->tme_stp222x_iommu_lru_prev(tlb_i_next);
427       stp222x->tme_stp222x_iommu_lru_next(tlb_i_prev) = tlb_i;
428       stp222x->tme_stp222x_iommu_lru_prev(tlb_i) = tlb_i_prev;
429       stp222x->tme_stp222x_iommu_lru_next(tlb_i) = tlb_i_next;
430       stp222x->tme_stp222x_iommu_lru_prev(tlb_i_next) = tlb_i;
431 
432       /* this TLB entry is now the most recently used: */
433       stp222x->tme_stp222x_iommu_tlb_i_mru = tlb_i;
434     }
435   }
436 
437   /* if we don't have a valid translation for this address: */
438   if ((tlb_mash & TME_STP222X_IOMMU_TLB_DATA_V) == 0) {
439 
440     /* return no connection index: */
441     *_slave_conn_index = TME_STP222X_CONN_NULL;
442     return (tlb_mash);
443   }
444 
445   /* if this address is translated to itself, on its local bus: */
446   if (tlb_mash & TME_STP2220_IOMMU_TLB_DATA_LOCAL) {
447 
448     /* look up this address on the connection's bus: */
449     *_slave_conn_index
450       = tme_stp222x_aspace_lookup(stp222x,
451 				  (TME_STP222X_IS_2220(stp222x)
452 				   ? TME_STP2220_ASPACE_SBUS
453 				   : ((io_conn_bus->tme_bus_connection.tme_connection_id
454 				       & TME_STP2222_CONNID_BUS_WHICH) == 0)
455 				   ? TME_STP2222_ASPACE_PCI_MEMORY(0)
456 				   : TME_STP2222_ASPACE_PCI_MEMORY(1)),
457 				  _io_to_slave_address);
458   }
459 
460   /* otherwise, this address is not local: */
461   else {
462 
463     /* if this address is translated directly into a UPA address: */
464     if (tlb_mash
465 	& (TME_STP222X_IOMMU_TLB_MASH_UPA_41
466 	   | TME_STP222X_IOMMU_TLB_MASH_UPA_31)) {
467 
468       /* convert the address into the UPA address: */
469 #if (TME_STP222X_IOMMU_TLB_MASH_UPA_41 / TME_STP222X_IOMMU_TLB_MASH_UPA_31) != (1 << (41 - 31))
470 #error "TME_STP222X_IOMMU_TLB_MASH_ values changed"
471 #endif
472       upa_address = *_io_to_slave_address;
473       upa_address
474 	&= (((tlb_mash
475 	      & (TME_STP222X_IOMMU_TLB_MASH_UPA_41
476 		 | TME_STP222X_IOMMU_TLB_MASH_UPA_31))
477 	     * ((((tme_bus_addr64_t) 1) << 31)
478 		/ TME_STP222X_IOMMU_TLB_MASH_UPA_31))
479 	    - 1);
480     }
481 
482     /* otherwise, this address is translated through the TLB entry data
483        into a UPA address: */
484     else {
485 
486       /* get the page size mask: */
487       page_size_m1 = _tme_stp222x_iommu_tlb_page_size(tlb_mash) - 1;
488 
489       /* convert the TLB entry data into the base UPA address: */
490       upa_address
491 	= (stp222x->tme_stp222x_iommu_tlb_data[TME_STP222X_IOMMU_TLB_MASH_TLB_I_WHICH(tlb_mash)]
492 	   & TME_STP222X_IOMMU_TLB_DATA_PPN);
493       upa_address *= TME_STP222X_IOMMU_SIZE_8KB;
494       upa_address &= ~ (tme_bus_addr64_t) page_size_m1;
495 
496       /* add in the offset: */
497       upa_address |= ((tme_bus_addr32_t) *_io_to_slave_address) & page_size_m1;
498 
499       /* return the UPA bus and address: */
500       *_slave_conn_index = TME_STP222X_CONN_UPA;
501       *_io_to_slave_address = upa_address;
502     }
503   }
504 
505   return (tlb_mash);
506 }
507 
508 /* this handles an IOMMU bus cycle: */
509 void
tme_stp222x_iommu_cycle(struct tme_bus_connection * master_conn_bus,struct tme_bus_cycle * master_cycle,tme_uint32_t * _master_fast_cycle_types,struct tme_completion * master_completion)510 tme_stp222x_iommu_cycle(struct tme_bus_connection *master_conn_bus,
511 			struct tme_bus_cycle *master_cycle,
512 			tme_uint32_t *_master_fast_cycle_types,
513 			struct tme_completion *master_completion)
514 {
515   struct tme_stp222x *stp222x;
516   struct tme_bus_tlb *tsb_tlb;
517   unsigned long tlb_i_must_hit;
518   tme_bus_addr64_t slave_address;
519   unsigned int slave_conn_index;
520   tme_uint32_t tlb_mash;
521   tme_uint32_t iommu_cr;
522   tme_uint32_t tsb_index;
523   tme_bus_addr64_t tsb_address;
524   struct tme_bus_connection *slave_conn_bus;
525   struct tme_bus_connection *slave_conn_bus_other;
526   struct tme_bus_tlb tsb_tlb_local;
527 #if TME_STP22XX_BUS_TRANSITION
528   int rc;
529 #endif /* TME_STP22XX_BUS_TRANSITION */
530   tme_uint64_t tte;
531   unsigned long tlb_i;
532   struct tme_stp222x_tlb_list *tlb_list;
533   signed long tlb_list_i;
534   struct tme_token *token;
535   tme_uint32_t tlb_data;
536   tme_uint32_t tlb_tag;
537   tme_uint32_t tlb_tag_error;
538 
539   /* enter: */
540   stp222x = tme_stp222x_enter_master_bus(master_conn_bus);
541 
542   /* start this cycle: */
543   assert (stp222x->tme_stp222x_master_completion == NULL);
544   stp222x->tme_stp222x_master_completion = &master_completion;
545 
546   /* start out using the stored TSB TLB: */
547   tsb_tlb = &stp222x->tme_stp222x_iommu_tsb_tlb;
548 
549   /* start out allowing the address to miss in the IOMMU TLB: */
550   tlb_i_must_hit = TME_STP222X_IOMMU_TLB_SIZE;
551 
552   /* loop forever: */
553   for (;;) {
554 
555     /* if this cycle has been aborted: */
556     if (stp222x->tme_stp222x_master_completion != &master_completion) {
557       tme_stp222x_leave(stp222x);
558       return;
559     }
560 
561     /* translate this address: */
562     slave_address = master_cycle->tme_bus_cycle_address;
563     tlb_mash = _tme_stp222x_iommu_tlb_mash_slave(master_conn_bus,
564 						 &slave_address,
565 						 master_cycle->tme_bus_cycle_type,
566 						 &slave_conn_index);
567 
568     /* if the translation for this address is valid: */
569     if (tlb_mash & TME_STP222X_IOMMU_TLB_DATA_V) {
570 
571       /* stop now: */
572       break;
573     }
574 
575     /* if the translation could not miss in the IOMMU TLB: */
576     if (tlb_i_must_hit < TME_STP222X_IOMMU_TLB_SIZE) {
577 
578       /* the translation must have hit the specified IOMMU TLB entry,
579 	 even though that TLB entry is apparently invalid: */
580       assert ((tlb_mash & TME_STP222X_IOMMU_TLB_MASH_FIXED) == 0
581 	      && TME_STP222X_IOMMU_TLB_MASH_TLB_I_WHICH(tlb_mash) == tlb_i_must_hit);
582 
583       /* stop now: */
584       break;
585     }
586 
587     /* get the IOMMU control register: */
588     iommu_cr = stp222x->tme_stp222x_iommu_cr;
589 
590     /* get the index in the TSB table to read: */
591     tsb_index = master_cycle->tme_bus_cycle_address;
592     tsb_index >>= _tme_stp222x_iommu_tbw_size_log2(iommu_cr);
593     tsb_index
594       &= ((2 <<
595 	   (22
596 	    + TME_FIELD_MASK_EXTRACTU(iommu_cr, TME_STP222X_IOMMU_CR_TSB_SIZE_MASK)
597 	    - TME_STP222X_IOMMU_SIZE_LOG2_8KB))
598 	  - 1);
599 
600     /* get the address in the TSB table to read: */
601     tsb_address = stp222x->tme_stp222x_iommu_tsb + (tsb_index * sizeof(tme_uint64_t));
602 
603     /* busy the TSB TLB: */
604     tme_bus_tlb_busy(tsb_tlb);
605 
606     /* if the TSB TLB is invalid or doesn't apply: */
607     if (tme_bus_tlb_is_invalid(tsb_tlb)
608 	|| tsb_address < (tme_bus_addr64_t) tsb_tlb->tme_bus_tlb_addr_first
609 	|| tsb_address > (tme_bus_addr64_t) tsb_tlb->tme_bus_tlb_addr_last) {
610 
611       /* force the TLB entry to be invalid, since after we clear its
612 	 invalid token, we may abort this cycle without storing it: */
613       stp222x->tme_stp222x_iommu_tsb_tlb.tme_bus_tlb_addr_first = 1;
614       stp222x->tme_stp222x_iommu_tsb_tlb.tme_bus_tlb_addr_last = 0;
615 
616       /* unbusy the TSB TLB for filling: */
617       tme_bus_tlb_unbusy_fill(tsb_tlb);
618 
619       /* busy the UPA connection: */
620       slave_conn_bus = tme_stp222x_slave_busy_bus(stp222x, TME_STP222X_CONN_UPA);
621 
622       /* leave: */
623       tme_stp222x_leave(stp222x);
624 
625       /* fill the local TLB entry: */
626       slave_conn_bus_other = (struct tme_bus_connection *) slave_conn_bus->tme_bus_connection.tme_connection_other;
627       tsb_tlb_local.tme_bus_tlb_token = &stp222x->tme_stp222x_iommu_tsb_tlb_token;
628 #if TME_STP22XX_BUS_TRANSITION
629       rc =
630 #endif /* TME_STP22XX_BUS_TRANSITION */
631       (*slave_conn_bus_other->tme_bus_tlb_fill)
632 	(slave_conn_bus_other,
633 	 &tsb_tlb_local,
634 	 tsb_address,
635 	 TME_BUS_CYCLE_READ);
636 #if TME_STP22XX_BUS_TRANSITION
637       assert (rc == TME_OK);
638 #endif /* TME_STP22XX_BUS_TRANSITION */
639 
640       /* reenter: */
641       stp222x = tme_stp222x_enter_bus(master_conn_bus);
642 
643       /* unbusy the UPA connection: */
644       tme_stp222x_slave_unbusy(stp222x);
645 
646       /* switch to using the newly filled local TLB entry: */
647       tsb_tlb = &tsb_tlb_local;
648 
649       /* loop now, to make sure we're still running: */
650       continue;
651     }
652 
653     /* if we switched to the local TLB entry: */
654     if (tsb_tlb == &tsb_tlb_local) {
655 
656       /* store the local TLB entry: */
657       stp222x->tme_stp222x_iommu_tsb_tlb = tsb_tlb_local;
658     }
659 
660     /* the TSB TLB must allow fast reading: */
661     if (tsb_tlb->tme_bus_tlb_emulator_off_read == TME_EMULATOR_OFF_UNDEF) {
662       abort();
663     }
664 
665     /* read the TTE: */
666     tte = tme_memory_bus_read64(((_tme_const tme_shared tme_uint64_t *)
667 				 (tsb_tlb->tme_bus_tlb_emulator_off_read
668 				  + tsb_address)),
669 				tsb_tlb->tme_bus_tlb_rwlock,
670 				sizeof(tme_uint64_t),
671 				sizeof(tme_uint64_t));
672     tte = tme_betoh_u64(tte);
673 
674     /* unbusy the TSB TLB: */
675     tme_bus_tlb_unbusy(tsb_tlb);
676 
677     /* if a TLB entry is already allocated to this address: */
678     if ((tlb_mash & TME_STP222X_IOMMU_TLB_MASH_MISS) == 0) {
679 
680       /* replace the same TLB entry: */
681       tlb_i = TME_STP222X_IOMMU_TLB_MASH_TLB_I_WHICH(tlb_mash);
682     }
683 
684     /* otherwise, a TLB entry isn't already allocated to this address: */
685     else {
686 
687       /* get the IOMMU control register: */
688       iommu_cr = stp222x->tme_stp222x_iommu_cr;
689 
690       /* if the LRU is locked: */
691       if (__tme_predict_false(iommu_cr & TME_STP222X_IOMMU_CR_LRU_LCKEN)) {
692 
693 	/* replace the locked TLB entry: */
694 	tlb_i = TME_FIELD_MASK_EXTRACTU(iommu_cr, TME_STP222X_IOMMU_CR_LRU_LCKPTR);
695       }
696 
697       /* otherwise, the LRU is not locked: */
698       else {
699 
700 	/* replace the least recently used TLB entry: */
701 	tlb_i = stp222x->tme_stp222x_iommu_lru_prev(stp222x->tme_stp222x_iommu_tlb_i_mru);
702       }
703     }
704 
705     /* invalidate all of the TLBs associated with this IOMMU TLB
706        entry: */
707     tlb_list = &stp222x->tme_stp222x_iommu_tlb_list[tlb_i];
708     tlb_list_i = TME_STP222X_TLB_LIST_TOKENS_COUNT - 1;
709     do {
710       token = tlb_list->tme_stp222x_tlb_list_tokens[tlb_list_i];
711       if (token != NULL) {
712 	tlb_list->tme_stp222x_tlb_list_tokens[tlb_list_i] = NULL;
713 	tme_token_invalidate(token);
714       }
715     } while (--tlb_list_i >= 0);
716 
717     /* make and store the TLB data: */
718 #if (TME_STP222X_IOMMU_TLB_DATA_PPN & 1) == 0
719 #error "TME_STP222X_IOMMU_TLB_DATA_PPN changed"
720 #endif
721     tlb_data = TME_FIELD_MASK_EXTRACTU(tte, TME_STP222X_IOMMU_TTE_DATA_PA);
722     if (tte & TME_STP222X_IOMMU_TTE_DATA_V) {
723       tlb_data += TME_STP222X_IOMMU_TLB_DATA_V;
724     }
725     if (tte & TME_STP2220_IOMMU_TTE_LOCALBUS) {
726       if (TME_STP222X_IS_2220(stp222x)) {
727 	tlb_data += TME_STP2220_IOMMU_TLB_DATA_LOCAL;
728       }
729     }
730     if (tte & TME_STP222X_IOMMU_TTE_CACHEABLE) {
731       tlb_data += TME_STP222X_IOMMU_TLB_DATA_C;
732     }
733     stp222x->tme_stp222x_iommu_tlb_data[tlb_i] = tlb_data;
734 
735     /* make and store the TLB tag: */
736     tlb_tag = (tme_uint32_t) master_cycle->tme_bus_cycle_address;
737     tlb_tag /= TME_STP222X_IOMMU_SIZE_8KB;
738     if (tte & TME_STP222X_IOMMU_TTE_DATA_W) {
739       tlb_tag += TME_STP222X_IOMMU_TLB_TAG_W;
740     }
741     if (tte & TME_STP222X_IOMMU_TTE_STREAM) {
742       tlb_tag += TME_STP222X_IOMMU_TLB_TAG_S;
743     }
744     if (tte & TME_STP222X_IOMMU_TTE_DATA_SIZE_MASK) {
745       tlb_tag += TME_STP222X_IOMMU_TLB_TAG_SIZE_MASK;
746     }
747     stp222x->tme_stp222x_iommu_tlb_tags[tlb_i] = tlb_tag;
748 
749     /* loop to look up this address in the IOMMU again.  this time, it
750        must hit this entry: */
751     tlb_i_must_hit = tlb_i;
752 
753 #if TME_STP22XX_BUS_TRANSITION
754 
755     /* invalidate the last TLB that missed the IOMMU TLB (because we
756        just filled the IOMMU TLB, we should get the master to refill
757        its TLB): */
758     if (stp222x->tme_stp222x_iommu_tlb_missed_token != NULL) {
759       tme_token_invalidate(stp222x->tme_stp222x_iommu_tlb_missed_token);
760       stp222x->tme_stp222x_iommu_tlb_missed_token = NULL;
761     }
762 
763 #endif /* TME_STP22XX_BUS_TRANSITION */
764   }
765 
766   /* assume that there is no IOMMU error: */
767   tlb_tag_error = !TME_STP222X_IOMMU_TLB_TAG_ERROR;
768 
769   /* if the address is not writable: */
770   if (__tme_predict_false((tlb_mash & TME_STP222X_IOMMU_TLB_TAG_W) == 0)) {
771 
772     /* the master can't do fast writes: */
773     *_master_fast_cycle_types &= ~TME_BUS_CYCLE_WRITE;
774 
775     /* if this is a write: */
776     if (__tme_predict_false(master_cycle->tme_bus_cycle_type & TME_BUS_CYCLE_WRITE)) {
777 
778       /* there is an IOMMU protection error: */
779       tlb_tag_error
780 	= (TME_STP222X_IOMMU_TLB_TAG_ERROR
781 	   + TME_STP222X_IOMMU_TLB_TAG_ERROR_PROTECTION);
782     }
783   }
784 
785   /* if we don't have a valid translation: */
786   if (__tme_predict_false((tlb_mash & TME_STP222X_IOMMU_TLB_DATA_V) == 0)) {
787 
788     /* there is an IOMMU invalid error: */
789     tlb_tag_error
790       = (TME_STP222X_IOMMU_TLB_TAG_ERROR
791 	 + TME_STP222X_IOMMU_TLB_TAG_ERROR_INVALID);
792   }
793 
794   /* if there is an IOMMU error: */
795   if (tlb_tag_error != !TME_STP222X_IOMMU_TLB_TAG_ERROR) {
796 
797     /* if this isn't a fixed translation: */
798     if ((tlb_mash & TME_STP222X_IOMMU_TLB_MASH_FIXED) == 0) {
799 
800       /* update the error in the tag for the TLB entry: */
801       tlb_i = TME_STP222X_IOMMU_TLB_MASH_TLB_I_WHICH(tlb_mash);
802       stp222x->tme_stp222x_iommu_tlb_tags[tlb_i]
803 	= ((stp222x->tme_stp222x_iommu_tlb_tags[tlb_i]
804 	    & ~(TME_STP222X_IOMMU_TLB_TAG_ERROR_MASK
805 		| TME_STP222X_IOMMU_TLB_TAG_ERROR))
806 	   + tlb_tag_error);
807     }
808 
809     /* update the error in the IOMMU control register: */
810 #if (TME_STP222X_IOMMU_TLB_TAG_ERROR_MASK * (TME_STP222X_IOMMU_CR_XLT_ERR_MASK / TME_STP222X_IOMMU_TLB_TAG_ERROR_MASK)) != TME_STP222X_IOMMU_CR_XLT_ERR_MASK
811 #error "IOMMU error masks changed"
812 #endif
813 #if (TME_STP222X_IOMMU_TLB_TAG_ERROR * (TME_STP222X_IOMMU_CR_XLT_ERR_MASK / TME_STP222X_IOMMU_TLB_TAG_ERROR_MASK)) != TME_STP222X_IOMMU_CR_XLT_ERR
814 #error "IOMMU error flags changed"
815 #endif
816     stp222x->tme_stp222x_iommu_cr
817       = ((stp222x->tme_stp222x_iommu_cr
818 	  & ~(TME_STP222X_IOMMU_CR_XLT_ERR_MASK
819 	      | TME_STP222X_IOMMU_CR_XLT_ERR))
820 	 + (tlb_tag_error
821 	    * (TME_STP222X_IOMMU_CR_XLT_ERR_MASK
822 	       / TME_STP222X_IOMMU_TLB_TAG_ERROR_MASK)));
823 
824     /* force no connection: */
825     slave_conn_index = TME_STP222X_CONN_NULL;
826   }
827 
828   /* run the slave bus cycle: */
829   master_cycle->tme_bus_cycle_address = slave_address;
830   tme_stp22xx_slave_cycle(master_conn_bus,
831 			  slave_conn_index,
832 			  master_cycle,
833 			  _master_fast_cycle_types,
834 			  &master_completion);
835 
836   /* leave: */
837   tme_stp222x_leave(stp222x);
838 }
839 
840 /* this fills a TLB entry from the IOMMU: */
841 void
tme_stp222x_iommu_tlb_fill(struct tme_bus_connection * io_conn_bus,struct tme_bus_tlb * tlb,tme_bus_addr_t io_address_wider,unsigned int cycle_type)842 tme_stp222x_iommu_tlb_fill(struct tme_bus_connection *io_conn_bus,
843 			   struct tme_bus_tlb *tlb,
844 			   tme_bus_addr_t io_address_wider,
845 			   unsigned int cycle_type)
846 {
847   struct tme_stp222x *stp222x;
848   tme_bus_addr64_t slave_address;
849   tme_uint32_t tlb_mash;
850   tme_uint32_t slave_conn_index;
851   struct tme_stp222x_tlb_list *tlb_list;
852   unsigned int tlb_list_head;
853   struct tme_token *token;
854   struct tme_token *token_other;
855   struct tme_bus_tlb tlb_mapping;
856   tme_uint32_t page_size_m1;
857   tme_bus_addr64_t io_address;
858 
859   /* enter: */
860   stp222x = tme_stp222x_enter_bus(io_conn_bus);
861 
862   /* translate this address: */
863   slave_address = io_address_wider;
864   tlb_mash = _tme_stp222x_iommu_tlb_mash_slave(io_conn_bus,
865 					       &slave_address,
866 					       cycle_type,
867 					       &slave_conn_index);
868 
869   /* if the translation for this address is valid: */
870   if (tlb_mash & TME_STP222X_IOMMU_TLB_DATA_V) {
871 
872     /* if this translation is fixed: */
873     if (tlb_mash & TME_STP222X_IOMMU_TLB_MASH_FIXED) {
874 
875       /* track this TLB entry in the fixed list: */
876       tlb_list = &stp222x->tme_stp222x_iommu_tlb_list_fixed;
877     }
878 
879     /* otherwise, this translation is not fixed: */
880     else {
881 
882       /* track this TLB entry in the list for the IOMMU TLB entry: */
883       tlb_list = &stp222x->tme_stp222x_iommu_tlb_list[TME_STP222X_IOMMU_TLB_MASH_TLB_I_WHICH(tlb_mash)];
884     }
885 
886     /* track this TLB entry: */
887     tlb_list_head = tlb_list->tme_stp222x_tlb_list_head;
888     token = tlb->tme_bus_tlb_token;
889     token_other = tlb_list->tme_stp222x_tlb_list_tokens[tlb_list_head];
890     tlb_list->tme_stp222x_tlb_list_tokens[tlb_list_head] = token;
891     tlb_list->tme_stp222x_tlb_list_head = (tlb_list_head + 1) % TME_STP222X_TLB_LIST_TOKENS_COUNT;
892     if (token_other != NULL
893 	&& token_other != token) {
894       tme_token_invalidate(token_other);
895     }
896   }
897 
898   /* otherwise, the translation for this address is not valid: */
899   else {
900 
901     /* we must not have any connection for this address: */
902     assert (slave_conn_index == TME_STP222X_CONN_NULL);
903 
904 #if TME_STP22XX_BUS_TRANSITION
905 
906     /* track the TLB for the most recent address that didn't have a
907        valid translation, so we can invalidate it when the slow cycle
908        happens (assuming the slow cycle will fill the IOMMU TLB with a
909        valid translation, we want the slave to then refill its
910        TLB): */
911     token = tlb->tme_bus_tlb_token;
912     token_other = stp222x->tme_stp222x_iommu_tlb_missed_token;
913     stp222x->tme_stp222x_iommu_tlb_missed_token = token;
914     if (token_other != NULL
915 	&& token_other != token) {
916       tme_token_invalidate(token_other);
917     }
918 
919 #endif /* TME_STP22XX_BUS_TRANSITION */
920   }
921 
922   /* fill the TLB entry: */
923   tme_stp22xx_tlb_fill(io_conn_bus,
924 		       tlb,
925 		       slave_conn_index,
926 		       slave_address,
927 		       cycle_type);
928 
929   /* leave: */
930   tme_stp222x_leave(stp222x);
931 
932   /* get the page size mask: */
933   page_size_m1 = _tme_stp222x_iommu_tlb_page_size(tlb_mash) - 1;
934 
935   /* map the filled TLB entry: */
936   io_address = ~ (tme_bus_addr64_t) page_size_m1;
937   io_address &= io_address_wider;
938   tlb_mapping.tme_bus_tlb_addr_first = io_address;
939   io_address |= page_size_m1;
940   tlb_mapping.tme_bus_tlb_addr_last = io_address;
941 #if TME_STP22XX_BUS_TRANSITION
942   tlb_mapping.tme_bus_tlb_cycles_ok = (TME_BUS_CYCLE_READ | TME_BUS_CYCLE_WRITE);
943 #endif /* TME_STP22XX_BUS_TRANSITION */
944   tme_bus_tlb_map(tlb, slave_address, &tlb_mapping, io_address_wider);
945 
946   /* if this address is not writable: */
947   if ((tlb_mash & TME_STP222X_IOMMU_TLB_TAG_W) == 0) {
948 
949     /* this TLB entry doesn't support writes: */
950     tlb->tme_bus_tlb_emulator_off_write = TME_EMULATOR_OFF_UNDEF;
951 #if TME_STP22XX_BUS_TRANSITION
952     tlb->tme_bus_tlb_cycles_ok &= ~TME_BUS_CYCLE_WRITE;
953 #endif /* TME_STP22XX_BUS_TRANSITION */
954   }
955 }
956 
957 /* the IOMMU register handler: */
958 void
tme_stp222x_iommu_regs(struct tme_stp222x * stp222x,struct tme_stp222x_reg * reg)959 tme_stp222x_iommu_regs(struct tme_stp222x *stp222x,
960 		       struct tme_stp222x_reg *reg)
961 {
962   tme_uint32_t reggroup_index;
963   const char *name;
964   tme_uint32_t io_address;
965   tme_uint32_t tlb_mash;
966   tme_uint32_t tlb_i;
967   struct tme_stp222x_tlb_list *tlb_list;
968   signed long tlb_list_i;
969   struct tme_token *token;
970 
971   /* get the register: */
972   reggroup_index = TME_STP222X_REGGROUP_INDEX(reg->tme_stp222x_reg_address);
973 
974   /* if this is a write: */
975   if (reg->tme_stp222x_reg_write) {
976 
977     /* dispatch on the register: */
978     switch (reggroup_index) {
979 
980     case TME_STP222X_IOMMU_REGGROUP_INDEX_CR:
981       stp222x->tme_stp222x_iommu_cr
982 	= (reg->tme_stp222x_reg_value
983 	   & ~TME_STP222X_IOMMU_CR_MBZ);
984       name = "CR";
985       break;
986 
987     case TME_STP222X_IOMMU_REGGROUP_INDEX_TSB:
988       stp222x->tme_stp222x_iommu_tsb
989 	= (reg->tme_stp222x_reg_value
990 	   & ((((tme_uint64_t) 2) << 40)
991 	      - (1 << 13)));
992       name = "TSB";
993       break;
994 
995     case TME_STP222X_IOMMU_REGGROUP_INDEX_FLUSH:
996 
997       /* get the address: */
998       io_address = 0 - (tme_uint32_t) TME_STP222X_IOMMU_SIZE_8KB;
999       io_address &= reg->tme_stp222x_reg_value;
1000 
1001       /* translate this address: */
1002       tlb_mash
1003 	= _tme_stp222x_iommu_tlb_mash(stp222x,
1004 				      io_address,
1005 				      TME_BUS_CYCLE_READ);
1006 
1007       /* if this address hit an IOMMU TLB entry: */
1008       if ((tlb_mash
1009 	   & (TME_STP222X_IOMMU_TLB_MASH_FIXED
1010 	      | TME_STP222X_IOMMU_TLB_MASH_MISS)) == 0) {
1011 
1012 	/* invalidate this entry: */
1013 	tlb_i = TME_STP222X_IOMMU_TLB_MASH_TLB_I_WHICH(tlb_mash);
1014 	stp222x->tme_stp222x_iommu_tlb_data[tlb_i] &= ~TME_STP222X_IOMMU_TLB_DATA_V;
1015 
1016 	/* invalidate all of the TLBs associated with this IOMMU TLB
1017 	   entry: */
1018 	tlb_list = &stp222x->tme_stp222x_iommu_tlb_list[tlb_i];
1019 	tlb_list_i = TME_STP222X_TLB_LIST_TOKENS_COUNT - 1;
1020 	do {
1021 	  token = tlb_list->tme_stp222x_tlb_list_tokens[tlb_list_i];
1022 	  if (token != NULL) {
1023 	    tlb_list->tme_stp222x_tlb_list_tokens[tlb_list_i] = NULL;
1024 	    tme_token_invalidate(token);
1025 	  }
1026 	} while (--tlb_list_i >= 0);
1027       }
1028       name = "FLUSH";
1029       break;
1030 
1031     default:
1032       return;
1033     }
1034   }
1035 
1036   /* otherwise, this is a read: */
1037   else {
1038 
1039     /* dispatch on the register: */
1040     switch (reggroup_index) {
1041     case TME_STP222X_IOMMU_REGGROUP_INDEX_CR:
1042       reg->tme_stp222x_reg_value = stp222x->tme_stp222x_iommu_cr;
1043       name = "CR";
1044       break;
1045     case TME_STP222X_IOMMU_REGGROUP_INDEX_TSB:
1046       reg->tme_stp222x_reg_value = stp222x->tme_stp222x_iommu_tsb;
1047       name = "TSB";
1048       break;
1049 /*  case TME_STP222X_IOMMU_REGGROUP_INDEX_FLUSH: */
1050     default:
1051       return;
1052     }
1053   }
1054 
1055   tme_log(TME_STP222X_LOG_HANDLE(stp222x), 2000, TME_OK,
1056 	  (TME_STP222X_LOG_HANDLE(stp222x),
1057 	   _("IOMMU %s %s 0x%" TME_PRIx64),
1058 	   name,
1059 	   (reg->tme_stp222x_reg_write
1060 	    ? "<-"
1061 	    : "->"),
1062 	   reg->tme_stp222x_reg_value));
1063 
1064   /* this register access has been completed: */
1065   reg->tme_stp222x_reg_completed = TRUE;
1066 }
1067 
1068 /* the IOMMU diagnostic register handler: */
1069 void
tme_stp222x_iommu_regs_diag(struct tme_stp222x * stp222x,struct tme_stp222x_reg * reg)1070 tme_stp222x_iommu_regs_diag(struct tme_stp222x *stp222x,
1071 			    struct tme_stp222x_reg *reg)
1072 {
1073   tme_uint32_t reggroup_0_3;
1074   tme_uint32_t reggroup_index;
1075   const char *name;
1076   signed long tlb_i;
1077   unsigned int lru_i;
1078   tme_uint32_t io_address;
1079   tme_uint32_t io_address_tag;
1080   tme_uint32_t compare;
1081   tme_uint32_t tlb_tag;
1082 
1083   /* XXX FIXME - what happens if diagnostics aren't enabled? */
1084   if ((stp222x->tme_stp222x_iommu_cr
1085        & TME_STP222X_IOMMU_CR_MMU_DE) == 0) {
1086     abort();
1087   }
1088 
1089   /* get the register: */
1090   reggroup_0_3 = TME_STP222X_REGGROUP_WHICH(reg->tme_stp222x_reg_address) & 0xf;
1091   reggroup_index = TME_STP222X_REGGROUP_INDEX(reg->tme_stp222x_reg_address);
1092 
1093   /* if this is a write: */
1094   if (reg->tme_stp222x_reg_write) {
1095 
1096     switch (reggroup_0_3) {
1097     case 0x4: /* STP2220 0x44, STP2222 0xa4 */
1098       switch (reggroup_index) {
1099       case TME_STP222X_IOMMU_REGGROUP_INDEX_DIAG4_VA:
1100 
1101 	/* get the address: */
1102 	io_address = 0 - (tme_uint32_t) TME_STP222X_IOMMU_SIZE_8KB;
1103 	io_address &= reg->tme_stp222x_reg_value;
1104 	stp222x->tme_stp222x_iommu_va = io_address;
1105 
1106 	/* convert the address into a VPN: */
1107 	io_address_tag = io_address / TME_STP222X_IOMMU_SIZE_8KB;
1108 
1109 	/* compare this address to all TLB tags: */
1110 	compare = 0;
1111 	tlb_i = TME_STP222X_IOMMU_TLB_SIZE - 1;
1112 	do {
1113 	  compare <<= 1;
1114 	  tlb_tag = stp222x->tme_stp222x_iommu_tlb_tags[tlb_i];
1115 	  if (((tlb_tag
1116 		^ io_address_tag)
1117 	       & _tme_stp222x_iommu_tlb_tag_vpn_mask(tlb_tag)) == 0) {
1118 	    compare++;
1119 	  }
1120 	} while (--tlb_i >= 0);
1121 	stp222x->tme_stp222x_iommu_compare = compare;
1122 	name = "VA";
1123 	break;
1124 
1125       case TME_STP222X_IOMMU_REGGROUP_INDEX_DIAG4_COMPARE:
1126 	reg->tme_stp222x_reg_completed = TRUE;
1127 	return;
1128       default:
1129 	return;
1130       }
1131 
1132     default: /* STP2220 0x45, STP2222 0xa5 */
1133       assert (reggroup_0_3 == 0x5);
1134       if (__tme_predict_false(reggroup_index < TME_STP222X_IOMMU_TLB_SIZE)) {
1135 	return;
1136       }
1137       stp222x->tme_stp222x_iommu_tlb_tags[reggroup_index - TME_STP222X_IOMMU_TLB_SIZE]
1138 	= reg->tme_stp222x_reg_value;
1139       name = "TLB_TAG";
1140       break;
1141 
1142     case 0x6: /* STP2220 0x46, STP2222 0xa6 */
1143       if (__tme_predict_false(reggroup_index >= TME_STP222X_IOMMU_TLB_SIZE)) {
1144 	return;
1145       }
1146       stp222x->tme_stp222x_iommu_tlb_data[reggroup_index]
1147 	= reg->tme_stp222x_reg_value;
1148       name = "TLB_DATA";
1149       break;
1150     }
1151   }
1152 
1153   /* otherwise, this is a read: */
1154   else {
1155 
1156     switch (reggroup_0_3) {
1157     case 0x4: /* STP2220 0x44, STP2222 0xa4 */
1158       switch (reggroup_index) {
1159       case TME_STP222X_IOMMU_REGGROUP_INDEX_DIAG4_VA:
1160 	reg->tme_stp222x_reg_value = stp222x->tme_stp222x_iommu_va;
1161 	name = "VA";
1162 	break;
1163       case TME_STP222X_IOMMU_REGGROUP_INDEX_DIAG4_COMPARE:
1164 	reg->tme_stp222x_reg_value = stp222x->tme_stp222x_iommu_compare;
1165 	name = "COMPARE";
1166 	break;
1167       default:
1168 	return;
1169       }
1170 
1171     default: /* STP2220 0x45, STP2222 0xa5 */
1172       assert (reggroup_0_3 == 0x5);
1173       if (__tme_predict_false(reggroup_index < TME_STP222X_IOMMU_TLB_SIZE)) {
1174 	tlb_i = stp222x->tme_stp222x_iommu_tlb_i_mru;
1175 	for (lru_i = reggroup_index; ++lru_i != TME_STP222X_IOMMU_TLB_SIZE; ) {
1176 	  tlb_i = stp222x->tme_stp222x_iommu_lru_next(tlb_i);
1177 	}
1178 	reg->tme_stp222x_reg_value = tlb_i;
1179 	name = "LRU";
1180       }
1181       else {
1182 	reg->tme_stp222x_reg_value
1183 	  = stp222x->tme_stp222x_iommu_tlb_tags[reggroup_index - TME_STP222X_IOMMU_TLB_SIZE];
1184 	name = "TLB_TAG";
1185       }
1186       break;
1187 
1188     case 0x6: /* STP2220 0x46, STP2222 0xa6 */
1189       if (__tme_predict_false(reggroup_index >= TME_STP222X_IOMMU_TLB_SIZE)) {
1190 	return;
1191       }
1192       reg->tme_stp222x_reg_value
1193 	= stp222x->tme_stp222x_iommu_tlb_data[reggroup_index];
1194       name = "TLB_DATA";
1195       break;
1196     }
1197   }
1198 
1199   if (reggroup_0_3 == 0x4) {  /* STP2220 0x44, STP2222 0xa4 */
1200     tme_log(TME_STP222X_LOG_HANDLE(stp222x), 2000, TME_OK,
1201 	    (TME_STP222X_LOG_HANDLE(stp222x),
1202 	     _("IOMMU %s %s 0x%" TME_PRIx64),
1203 	     name,
1204 	     (reg->tme_stp222x_reg_write
1205 	      ? "<-"
1206 	      : "->"),
1207 	     reg->tme_stp222x_reg_value));
1208   }
1209   else {
1210     tme_log(TME_STP222X_LOG_HANDLE(stp222x), 2000, TME_OK,
1211 	    (TME_STP222X_LOG_HANDLE(stp222x),
1212 	     _("IOMMU %s[%u] %s 0x%" TME_PRIx64),
1213 	     name,
1214 	     (reggroup_index % TME_STP222X_IOMMU_TLB_SIZE),
1215 	     (reg->tme_stp222x_reg_write
1216 	      ? "<-"
1217 	      : "->"),
1218 	     reg->tme_stp222x_reg_value));
1219   }
1220 
1221   /* this register access has been completed: */
1222   reg->tme_stp222x_reg_completed = TRUE;
1223 }
1224 
1225 /* this initializes the IOMMU: */
1226 void
tme_stp222x_iommu_init(struct tme_stp222x * stp222x)1227 tme_stp222x_iommu_init(struct tme_stp222x *stp222x)
1228 {
1229   unsigned long tlb_i;
1230 
1231   /* initialize the TSB TLB: */
1232   tme_token_init(&stp222x->tme_stp222x_iommu_tsb_tlb_token);
1233   stp222x->tme_stp222x_iommu_tsb_tlb.tme_bus_tlb_token = &stp222x->tme_stp222x_iommu_tsb_tlb_token;
1234 
1235   /* initialize the LRU list: */
1236   tlb_i = 0;
1237   stp222x->tme_stp222x_iommu_tlb_i_mru = tlb_i;
1238   do {
1239     stp222x->tme_stp222x_iommu_lru_prev(tlb_i) = (tlb_i - 1) % TME_STP222X_IOMMU_TLB_SIZE;
1240     stp222x->tme_stp222x_iommu_lru_next(tlb_i) = (tlb_i + 1) % TME_STP222X_IOMMU_TLB_SIZE;
1241     tlb_i = (tlb_i + 1) % TME_STP222X_IOMMU_TLB_SIZE;
1242   } while (tlb_i != 0);
1243 }
1244