1 /* $Id: stp22xx.c,v 1.4 2009/09/07 15:03:19 fredette Exp $ */
2
3 /* ic/stp22xx.c - common STP2200, STP2202, STP2220, and STP2222
4 emulation: */
5
6 /*
7 * Copyright (c) 2009 Matt Fredette
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Matt Fredette.
21 * 4. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
32 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <tme/common.h>
38 _TME_RCSID("$Id: stp22xx.c,v 1.4 2009/09/07 15:03:19 fredette Exp $");
39
40 /* includes: */
41 #include "stp22xx-impl.h"
42
43 /* this busies a generic bus connection: */
44 struct tme_bus_connection *
tme_stp22xx_busy_bus(struct tme_stp22xx * stp22xx,tme_uint32_t conn_index)45 tme_stp22xx_busy_bus(struct tme_stp22xx *stp22xx,
46 tme_uint32_t conn_index)
47 {
48 struct tme_bus_connection *conn_bus;
49
50 /* if the connection index is valid: */
51 conn_bus = NULL;
52 if (__tme_predict_true(conn_index != stp22xx->tme_stp22xx_conn_index_null)) {
53
54 /* if there is a connection at this index: */
55 conn_bus = stp22xx->tme_stp22xx_conns[conn_index].tme_stp22xx_conn_bus;
56 if (__tme_predict_true(conn_bus != NULL)) {
57
58 }
59 }
60
61 return (conn_bus);
62 }
63
64 /* this unbusies a generic bus connection: */
65 void
tme_stp22xx_unbusy_bus(struct tme_stp22xx * stp22xx,struct tme_bus_connection * conn_bus)66 tme_stp22xx_unbusy_bus(struct tme_stp22xx *stp22xx,
67 struct tme_bus_connection *conn_bus)
68 {
69
70 /* the connection must be valid: */
71 assert (conn_bus != NULL);
72 }
73
74 /* this busies a slave generic bus connection: */
75 struct tme_bus_connection *
tme_stp22xx_slave_busy_bus(struct tme_stp22xx * stp22xx,tme_uint32_t slave_conn_index)76 tme_stp22xx_slave_busy_bus(struct tme_stp22xx *stp22xx,
77 tme_uint32_t slave_conn_index)
78 {
79 struct tme_bus_connection *slave_conn_bus;
80
81 /* if the connection index is valid: */
82 slave_conn_bus = tme_stp22xx_busy_bus(stp22xx, slave_conn_index);
83 if (__tme_predict_true(slave_conn_bus != NULL)) {
84
85 /* this is now the busy slave connection: */
86 assert (stp22xx->tme_stp22xx_slave_conn_bus == NULL);
87 stp22xx->tme_stp22xx_slave_conn_bus = slave_conn_bus;
88 }
89 return (slave_conn_bus);
90 }
91
92 /* this unbusies a slave generic bus connection: */
93 void
tme_stp22xx_slave_unbusy(struct tme_stp22xx * stp22xx)94 tme_stp22xx_slave_unbusy(struct tme_stp22xx *stp22xx)
95 {
96
97 /* unbusy the slave connection: */
98 tme_stp22xx_unbusy_bus(stp22xx, stp22xx->tme_stp22xx_slave_conn_bus);
99
100 /* there is now no busy slave connection: */
101 stp22xx->tme_stp22xx_slave_conn_bus = NULL;
102 }
103
104 /* this enters without locking the mutex: */
105 static struct tme_stp22xx *
_tme_stp22xx_enter_locked(struct tme_stp22xx * stp22xx)106 _tme_stp22xx_enter_locked(struct tme_stp22xx *stp22xx)
107 {
108 signed long completion_i;
109 struct tme_completion *completion;
110 _tme_stp22xx_completion_handler_t handler;
111
112 /* loop over the completions: */
113 completion_i = TME_STP22XX_COMPLETIONS_MAX - 1;
114 completion = &stp22xx->tme_stp22xx_completions[completion_i];
115 do {
116
117 /* if this completion is valid: */
118 if (tme_completion_is_valid(completion)) {
119
120 /* make a read-before-read barrier: */
121 tme_memory_barrier(stp22xx, stp22xx->tme_stp22xx_sizeof, TME_MEMORY_BARRIER_WRITE_BEFORE_WRITE);
122
123 /* invalidate the completion: */
124 tme_completion_invalidate(completion);
125
126 /* call the completion handler: */
127 handler = stp22xx->tme_stp22xx_completion_handlers[completion_i];
128 assert (handler != NULL);
129 stp22xx->tme_stp22xx_completion_handlers[completion_i] = NULL;
130 (*handler)(stp22xx,
131 completion,
132 stp22xx->tme_stp22xx_completion_args[completion_i]);
133 }
134
135 completion--;
136 } while (--completion_i >= 0);
137
138 return (stp22xx);
139 }
140
141 /* this enters: */
142 struct tme_stp22xx *
tme_stp22xx_enter(struct tme_stp22xx * stp22xx)143 tme_stp22xx_enter(struct tme_stp22xx *stp22xx)
144 {
145
146 /* lock the mutex: */
147 tme_mutex_lock(&stp22xx->tme_stp22xx_mutex);
148
149 /* finish the enter: */
150 return (_tme_stp22xx_enter_locked(stp22xx));
151 }
152
153 /* this enters as the bus master: */
154 struct tme_stp22xx *
tme_stp22xx_enter_master(struct tme_bus_connection * master_conn_bus)155 tme_stp22xx_enter_master(struct tme_bus_connection *master_conn_bus)
156 {
157 struct tme_stp22xx *stp22xx;
158
159 #if TME_STP22XX_BUS_TRANSITION
160 signed long completion_i;
161 struct tme_completion *completion;
162
163 /* if the bus master was making a callout through the bus: */
164 #if !TME_THREADS_COOPERATIVE
165 #error "preemptive threads not supported yet"
166 #endif /* !TME_THREADS_COOPERATIVE */
167 stp22xx = master_conn_bus->tme_bus_connection.tme_connection_element->tme_element_private;
168 if (stp22xx->tme_stp22xx_master_completion != NULL) {
169
170 /* find the completion for that callout and force it to be valid.
171 this is necessary since we may be reentered by a new master
172 before the original callout returns: */
173 completion_i = TME_STP22XX_COMPLETIONS_MAX - 1;
174 for (;;) {
175 if (stp22xx->tme_stp22xx_completion_handlers[completion_i]
176 == tme_stp22xx_complete_master) {
177 break;
178 }
179 --completion_i;
180 assert (completion_i >= 0);
181 }
182 assert (stp22xx->tme_stp22xx_completion_args[completion_i]
183 == stp22xx->tme_stp22xx_master_completion);
184 completion = &stp22xx->tme_stp22xx_completions[completion_i];
185 if (!tme_completion_is_valid(completion)) {
186 tme_completion_validate(completion);
187 }
188 }
189
190 #endif /* TME_STP22XX_BUS_TRANSITION */
191
192 /* enter: */
193 stp22xx = tme_stp22xx_enter(master_conn_bus->tme_bus_connection.tme_connection_element->tme_element_private);
194
195 /* only the bus master can cause a callout through the bus, and the
196 bus master can only cause one such callout at a time: */
197 assert (stp22xx->tme_stp22xx_master_completion == NULL);
198
199 return (stp22xx);
200 }
201
202 /* this runs: */
203 static void
_tme_stp22xx_run(struct tme_stp22xx * stp22xx)204 _tme_stp22xx_run(struct tme_stp22xx *stp22xx)
205 {
206
207 /* if the run function is not running: */
208 if (!stp22xx->tme_stp22xx_running) {
209
210 /* the run function is now running: */
211 stp22xx->tme_stp22xx_running = TRUE;
212
213 /* call the run function: */
214 (*stp22xx->tme_stp22xx_run)(stp22xx);
215
216 /* the run function is not running: */
217 stp22xx->tme_stp22xx_running = FALSE;
218 }
219 }
220
221 /* this leaves: */
222 void
tme_stp22xx_leave(struct tme_stp22xx * stp22xx)223 tme_stp22xx_leave(struct tme_stp22xx *stp22xx)
224 {
225 signed long completion_i;
226 struct tme_completion *completion;
227 struct tme_completion *completions_delayed[TME_STP22XX_COMPLETIONS_DELAYED_MAX];
228
229 /* run: */
230 _tme_stp22xx_run(stp22xx);
231
232 /* get any completions whose validations were delayed: */
233 for (completion_i = 0;; completion_i++) {
234 completion = stp22xx->tme_stp22xx_completions_delayed[completion_i];
235 if (completion == NULL) {
236 break;
237 }
238 stp22xx->tme_stp22xx_completions_delayed[completion_i] = NULL;
239 completions_delayed[completion_i] = completion;
240 }
241
242 /* unlock the mutex: */
243 tme_mutex_unlock(&stp22xx->tme_stp22xx_mutex);
244
245 /* validate any completions: */
246 for (; --completion_i >= 0; ) {
247 tme_completion_validate(completions_delayed[completion_i]);
248 }
249 }
250
251 /* this waits on a condition, with an optional sleep time: */
252 void
tme_stp22xx_cond_sleep_yield(struct tme_stp22xx * stp22xx,struct tme_stp22xx_cond * cond,const struct timeval * sleep)253 tme_stp22xx_cond_sleep_yield(struct tme_stp22xx *stp22xx,
254 struct tme_stp22xx_cond *cond,
255 const struct timeval *sleep)
256 {
257 signed long completion_i;
258 struct tme_completion *completion;
259
260 /* this condition must be idle: */
261 assert (cond->tme_stp22xx_cond_state == TME_STP22XX_COND_STATE_IDLE);
262
263 /* we are now running, before waiting on this condition: */
264 cond->tme_stp22xx_cond_state = TME_STP22XX_COND_STATE_RUNNING;
265
266 /* run: */
267 _tme_stp22xx_run(stp22xx);
268
269 /* if this condition was notified while we were running: */
270 if (cond->tme_stp22xx_cond_state == TME_STP22XX_COND_STATE_NOTIFIED) {
271
272 /* this condition is idle again: */
273 cond->tme_stp22xx_cond_state = TME_STP22XX_COND_STATE_IDLE;
274
275 return;
276 }
277
278 /* the condition must still be running: */
279 assert (cond->tme_stp22xx_cond_state == TME_STP22XX_COND_STATE_RUNNING);
280
281 /* make a total write-after-write barrier, to force writes to the
282 completion states to happen before validation: */
283 tme_memory_barrier(0, 0, TME_MEMORY_BARRIER_WRITE_BEFORE_WRITE);
284
285 /* validate any completions whose validations were delayed: */
286 for (completion_i = 0;; completion_i++) {
287 completion = stp22xx->tme_stp22xx_completions_delayed[completion_i];
288 if (completion == NULL) {
289 break;
290 }
291 stp22xx->tme_stp22xx_completions_delayed[completion_i] = NULL;
292 tme_completion_validate(completion);
293 }
294
295 /* we are now waiting on this condition, unless threading is
296 cooperative, in which case the condition will be idle again the
297 next time this thread runs: */
298 cond->tme_stp22xx_cond_state
299 = (TME_THREADS_COOPERATIVE
300 ? TME_STP22XX_COND_STATE_IDLE
301 : TME_STP22XX_COND_STATE_WAITING);
302
303 /* sleep or wait on the condition variable: */
304 if (sleep != NULL) {
305 tme_cond_sleep_yield(&cond->tme_stp22xx_cond_cond,
306 &stp22xx->tme_stp22xx_mutex,
307 sleep);
308 }
309 else {
310 tme_cond_wait_yield(&cond->tme_stp22xx_cond_cond,
311 &stp22xx->tme_stp22xx_mutex);
312 }
313
314 /* this condition is idle again: */
315 cond->tme_stp22xx_cond_state = TME_STP22XX_COND_STATE_IDLE;
316
317 /* reenter: */
318 _tme_stp22xx_enter_locked(stp22xx);
319 }
320
321 /* this validates a completion: */
322 /* NB: completion may be NULL: */
323 void
tme_stp22xx_completion_validate(struct tme_stp22xx * stp22xx,struct tme_completion * completion)324 tme_stp22xx_completion_validate(struct tme_stp22xx *stp22xx,
325 struct tme_completion *completion)
326 {
327 unsigned long completion_i;
328
329 /* we delay validating completions: */
330 for (completion_i = 0;; completion_i++) {
331 assert (completion_i < TME_STP22XX_COMPLETIONS_DELAYED_MAX);
332 if (stp22xx->tme_stp22xx_completions_delayed[completion_i] == NULL) {
333 break;
334 }
335 }
336 stp22xx->tme_stp22xx_completions_delayed[completion_i] = completion;
337 }
338
339 /* this allocates a completion: */
340 struct tme_completion *
tme_stp22xx_completion_alloc(struct tme_stp22xx * stp22xx,_tme_stp22xx_completion_handler_t handler,void * arg)341 tme_stp22xx_completion_alloc(struct tme_stp22xx *stp22xx,
342 _tme_stp22xx_completion_handler_t handler,
343 void *arg)
344 {
345 signed long completion_i;
346 struct tme_completion *completion;
347
348 /* find a free completion: */
349 completion_i = TME_STP22XX_COMPLETIONS_MAX - 1;
350 for (; stp22xx->tme_stp22xx_completion_handlers[completion_i] != NULL; ) {
351 completion_i--;
352 assert (completion_i >= 0);
353 }
354
355 /* allocate this completion: */
356 stp22xx->tme_stp22xx_completion_handlers[completion_i] = handler;
357 stp22xx->tme_stp22xx_completion_args[completion_i] = arg;
358
359 /* the completion can't still (or already) be valid: */
360 completion = &stp22xx->tme_stp22xx_completions[completion_i];
361 assert (!tme_completion_is_valid(completion));
362
363 /* return the completion: */
364 return (completion);
365 }
366
367 /* this calls out a bus signal to a connection: */
368 void
tme_stp22xx_callout_signal(struct tme_stp22xx * stp22xx,tme_uint32_t conn_index,unsigned int signal,_tme_stp22xx_completion_handler_t handler)369 tme_stp22xx_callout_signal(struct tme_stp22xx *stp22xx,
370 tme_uint32_t conn_index,
371 unsigned int signal,
372 _tme_stp22xx_completion_handler_t handler)
373 {
374 struct tme_bus_connection *conn_bus;
375 struct tme_completion completion_buffer;
376 struct tme_completion *completion;
377 struct tme_bus_connection *conn_bus_other;
378
379 /* if there is a connection at this index: */
380 conn_bus = tme_stp22xx_busy_bus(stp22xx, conn_index);
381 if (conn_bus != NULL) {
382
383 /* if the connection at this index doesn't care about bus
384 signals: */
385 conn_bus_other = (struct tme_bus_connection *) conn_bus->tme_bus_connection.tme_connection_other;
386 if (conn_bus_other->tme_bus_signal == NULL) {
387
388 /* unbusy the bus connection: */
389 tme_stp22xx_unbusy_bus(stp22xx, conn_bus);
390
391 /* behave as if there is no connection at this index: */
392 conn_bus = NULL;
393 }
394 }
395
396 /* if there is no connection at this index: */
397 if (conn_bus == NULL) {
398
399 /* call the completion handler directly: */
400 completion_buffer.tme_completion_error = TME_OK;
401 (*handler)(stp22xx, &completion_buffer, (void *) NULL);
402 return;
403 }
404
405 /* allocate a completion: */
406 completion = tme_stp22xx_completion_alloc(stp22xx, handler, (void *) NULL);
407
408 /* leave: */
409 tme_stp22xx_leave(stp22xx);
410
411 /* call out the bus signal: */
412 conn_bus_other = (struct tme_bus_connection *) conn_bus->tme_bus_connection.tme_connection_other;
413 #if TME_STP22XX_BUS_TRANSITION
414 completion->tme_completion_error =
415 (*conn_bus_other->tme_bus_signal)
416 (conn_bus_other,
417 signal);
418 tme_completion_validate(completion);
419 #else /* !TME_STP22XX_BUS_TRANSITION */
420 #error WRITEME
421 #endif /* !TME_STP22XX_BUS_TRANSITION */
422
423 /* reenter: */
424 tme_stp22xx_enter(stp22xx);
425
426 /* unbusy the bus connection: */
427 tme_stp22xx_unbusy_bus(stp22xx, conn_bus);
428 }
429
430 /* this completes a bus operation between master and slave: */
431 void
tme_stp22xx_complete_master(struct tme_stp22xx * stp22xx,struct tme_completion * completion,void * __master_completion)432 tme_stp22xx_complete_master(struct tme_stp22xx *stp22xx,
433 struct tme_completion *completion,
434 void *__master_completion)
435 {
436 struct tme_completion **_master_completion;
437 struct tme_completion *master_completion;
438
439 /* unbusy the slave connection: */
440 tme_stp22xx_slave_unbusy(stp22xx);
441
442 /* if this bus operation was not aborted: */
443 _master_completion = (struct tme_completion **) __master_completion;
444 if (stp22xx->tme_stp22xx_master_completion == _master_completion) {
445
446 /* pass any slave error and scalar return value back to the master: */
447 master_completion = *_master_completion;
448 master_completion->tme_completion_error = completion->tme_completion_error;
449 master_completion->tme_completion_scalar = completion->tme_completion_scalar;
450
451 /* this bus operation is completed: */
452 stp22xx->tme_stp22xx_master_completion = NULL;
453
454 /* validate the completion: */
455 tme_stp22xx_completion_validate(stp22xx, master_completion);
456 }
457 }
458
459 /* this completes a bus grant: */
460 void
tme_stp22xx_complete_bg(struct tme_stp22xx * stp22xx,struct tme_completion * completion,void * arg)461 tme_stp22xx_complete_bg(struct tme_stp22xx *stp22xx,
462 struct tme_completion *completion,
463 void *arg)
464 {
465 stp22xx->tme_stp22xx_master_conn_index
466 = stp22xx->tme_stp22xx_master_conn_index_pending;
467 stp22xx->tme_stp22xx_master_conn_index_pending = stp22xx->tme_stp22xx_conn_index_null;
468
469 /* unused: */
470 completion = 0;
471 arg = 0;
472 }
473
474 /* this is a no-op completion: */
475 void
tme_stp22xx_complete_nop(struct tme_stp22xx * stp22xx,struct tme_completion * completion,void * arg)476 tme_stp22xx_complete_nop(struct tme_stp22xx *stp22xx,
477 struct tme_completion *completion,
478 void *arg)
479 {
480 /* unused: */
481 stp22xx = 0;
482 completion = 0;
483 arg = 0;
484 }
485
486 /* this runs a slave bus cycle: */
487 void
tme_stp22xx_slave_cycle(struct tme_bus_connection * master_conn_bus,tme_uint32_t slave_conn_index,struct tme_bus_cycle * master_cycle,tme_uint32_t * _master_fast_cycle_types,struct tme_completion ** _master_completion)488 tme_stp22xx_slave_cycle(struct tme_bus_connection *master_conn_bus,
489 tme_uint32_t slave_conn_index,
490 struct tme_bus_cycle *master_cycle,
491 tme_uint32_t *_master_fast_cycle_types,
492 struct tme_completion **_master_completion)
493 {
494 struct tme_stp22xx *stp22xx;
495 struct tme_bus_connection *slave_conn_bus;
496 int completion_error;
497 #if TME_STP22XX_BUS_TRANSITION
498 struct tme_bus_tlb tlb_local;
499 int rc;
500 tme_uint8_t *memory;
501 tme_bus_addr_t tlb_cycle_address;
502 int shift;
503 #endif /* TME_STP22XX_BUS_TRANSITION */
504 struct tme_completion *completion;
505 struct tme_bus_connection *slave_conn_bus_other;
506 struct tme_completion *master_completion;
507
508 /* recover our data structure: */
509 stp22xx = master_conn_bus->tme_bus_connection.tme_connection_element->tme_element_private;
510
511 /* this cycle must not have been aborted: */
512 assert (stp22xx->tme_stp22xx_master_completion == _master_completion);
513
514 /* busy the connection to any slave: */
515 slave_conn_bus = tme_stp22xx_slave_busy_bus(stp22xx, slave_conn_index);
516
517 /* if the connection index is invalid, or if there is no slave at
518 this connection index: */
519 if (__tme_predict_false(slave_conn_bus == NULL)) {
520
521 /* complete with an error: */
522 completion_error = ENOENT;
523 }
524
525 /* otherwise, if the master is trying a cycle to itself: */
526 else if (__tme_predict_false(slave_conn_bus == master_conn_bus)) {
527
528 /* unbusy the connection to the slave: */
529 tme_stp22xx_slave_unbusy(stp22xx);
530
531 /* complete with an error: */
532 completion_error = EIO;
533 }
534
535 /* otherwise, we can run this cycle: */
536 else {
537
538 #if TME_STP22XX_BUS_TRANSITION
539
540 /* fill a TLB entry for this address: */
541 tlb_local.tme_bus_tlb_token = &stp22xx->tme_stp22xx_slave_cycle_tlb_token;
542 slave_conn_bus_other = (struct tme_bus_connection *) slave_conn_bus->tme_bus_connection.tme_connection_other;
543 rc = ((*slave_conn_bus_other->tme_bus_tlb_fill)
544 (slave_conn_bus_other,
545 &tlb_local,
546 master_cycle->tme_bus_cycle_address,
547 master_cycle->tme_bus_cycle_type));
548 assert (rc == TME_OK);
549
550 /* the master can't do any fast transfers that this TLB entry
551 doesn't allow: */
552 if (tlb_local.tme_bus_tlb_emulator_off_read == TME_EMULATOR_OFF_UNDEF) {
553 *_master_fast_cycle_types &= ~TME_BUS_CYCLE_READ;
554 }
555 if (tlb_local.tme_bus_tlb_emulator_off_write == TME_EMULATOR_OFF_UNDEF) {
556 *_master_fast_cycle_types &= ~TME_BUS_CYCLE_WRITE;
557 }
558
559 /* if this cycle can be done fast: */
560 /* NB: this breaks tme_shared and const: */
561 memory
562 = (master_cycle->tme_bus_cycle_type == TME_BUS_CYCLE_READ
563 ? (tme_uint8_t *) tlb_local.tme_bus_tlb_emulator_off_read
564 : (tme_uint8_t *) tlb_local.tme_bus_tlb_emulator_off_write);
565 if (memory != TME_EMULATOR_OFF_UNDEF) {
566
567 /* do the fast transfer: */
568 tme_bus_cycle_xfer_memory(master_cycle, memory, tlb_local.tme_bus_tlb_addr_last);
569
570 /* unbusy the connection to the slave: */
571 tme_stp22xx_slave_unbusy(stp22xx);
572
573 /* complete with success: */
574 master_completion = *_master_completion;
575 master_completion->tme_completion_error = TME_OK;
576 tme_stp22xx_completion_validate(stp22xx, master_completion);
577 stp22xx->tme_stp22xx_master_completion = NULL;
578 return;
579 }
580
581 #endif /* TME_STP22XX_BUS_TRANSITION */
582
583 /* when we complete, we will will complete for the master: */
584 completion
585 = tme_stp22xx_completion_alloc(stp22xx,
586 tme_stp22xx_complete_master,
587 _master_completion);
588
589 /* leave: */
590 tme_stp22xx_leave(stp22xx);
591
592 /* run this cycle: */
593 #if TME_STP22XX_BUS_TRANSITION
594 tlb_cycle_address = tlb_local.tme_bus_tlb_addr_offset + master_cycle->tme_bus_cycle_address;
595 shift = tlb_local.tme_bus_tlb_addr_shift;
596 if (shift < 0) {
597 tlb_cycle_address <<= (0 - shift);
598 }
599 else if (shift > 0) {
600 tlb_cycle_address >>= shift;
601 }
602 master_cycle->tme_bus_cycle_address = tlb_cycle_address;
603 /* NB: we may be reentered before this cycle callout returns,
604 usually by this slave device immediately turning around to
605 become a master. unfortunately, this means that
606 tme_stp22xx_enter_master() will have to validate the first
607 master's completion itself, before we would normally do it
608 here. this means that we also have to predict how the cycle
609 will complete - and we assume that if the slave does turn
610 around to become a master, that the cycle completed
611 successfully: */
612 completion->tme_completion_error = TME_OK;
613 rc
614 = ((*tlb_local.tme_bus_tlb_cycle)
615 (tlb_local.tme_bus_tlb_cycle_private,
616 master_cycle));
617 if (stp22xx->tme_stp22xx_master_completion != _master_completion) {
618 assert (rc == TME_OK);
619 }
620 else {
621 completion->tme_completion_error = rc;
622 tme_completion_validate(completion);
623 }
624 #else /* !TME_STP22XX_BUS_TRANSITION */
625 #error WRITEME
626 #endif /* !TME_STP22XX_BUS_TRANSITION */
627
628 /* reenter: */
629 tme_stp22xx_enter(stp22xx);
630
631 return;
632 }
633
634 /* complete with the error: */
635 master_completion = *_master_completion;
636 master_completion->tme_completion_error = completion_error;
637 tme_stp22xx_completion_validate(stp22xx, master_completion);
638 stp22xx->tme_stp22xx_master_completion = NULL;
639 *_master_fast_cycle_types = 0;
640 }
641
642 /* this fills a TLB entry: */
643 void
tme_stp22xx_tlb_fill(struct tme_bus_connection * agent_conn_bus,struct tme_bus_tlb * tlb,tme_uint32_t slave_conn_index,tme_bus_addr64_t slave_address,unsigned int cycle_type)644 tme_stp22xx_tlb_fill(struct tme_bus_connection *agent_conn_bus,
645 struct tme_bus_tlb *tlb,
646 tme_uint32_t slave_conn_index,
647 tme_bus_addr64_t slave_address,
648 unsigned int cycle_type)
649 {
650 struct tme_stp22xx *stp22xx;
651 struct tme_bus_connection *slave_conn_bus;
652 struct tme_bus_connection *slave_conn_bus_other;
653 #if TME_STP22XX_BUS_TRANSITION
654 int rc;
655 #endif /* TME_STP22XX_BUS_TRANSITION */
656
657 /* recover our data structure: */
658 stp22xx = agent_conn_bus->tme_bus_connection.tme_connection_element->tme_element_private;
659
660 /* busy the connection to any slave: */
661 slave_conn_bus = tme_stp22xx_busy_bus(stp22xx, slave_conn_index);
662
663 /* if the connection index is invalid, or if there is no slave at
664 this connection index, or if the agent is filling a TLB for an
665 address in itself: */
666 if (__tme_predict_false(slave_conn_bus == NULL
667 || slave_conn_bus == agent_conn_bus)) {
668
669 /* unbusy any connection to the slave: */
670 if (slave_conn_bus != NULL) {
671 tme_stp22xx_unbusy_bus(stp22xx, slave_conn_bus);
672 }
673
674 /* initialize the TLB entry: */
675 tme_bus_tlb_initialize(tlb);
676
677 /* our caller will map this TLB entry from covering all addresses
678 to only covering a region's addresses: */
679 tlb->tme_bus_tlb_addr_first = 0;
680 tlb->tme_bus_tlb_addr_last = 0 - (tme_bus_addr_t) 1;
681 }
682
683 /* otherwise, the slave can fill this TLB entry: */
684 else {
685
686 /* leave: */
687 tme_stp22xx_leave(stp22xx);
688
689 /* fill this TLB entry: */
690 slave_conn_bus_other = (struct tme_bus_connection *) slave_conn_bus->tme_bus_connection.tme_connection_other;
691 #if TME_STP22XX_BUS_TRANSITION
692 rc =
693 #endif /* TME_STP22XX_BUS_TRANSITION */
694 (*slave_conn_bus_other->tme_bus_tlb_fill)
695 (slave_conn_bus_other,
696 tlb,
697 slave_address,
698 cycle_type);
699 #if TME_STP22XX_BUS_TRANSITION
700 assert (rc == TME_OK);
701 #endif /* TME_STP22XX_BUS_TRANSITION */
702
703 /* reenter: */
704 tme_stp22xx_enter(stp22xx);
705
706 /* unbusy the connection to the slave: */
707 tme_stp22xx_unbusy_bus(stp22xx, slave_conn_bus);
708 }
709 }
710
711 #if TME_STP22XX_BUS_TRANSITION
712
713 /* this is the bus TLB set add transition glue: */
714 #undef tme_stp22xx_tlb_set_add
715 int
tme_stp22xx_tlb_set_add_transition(struct tme_bus_connection * agent_conn_bus,struct tme_bus_tlb_set_info * tlb_set_info)716 tme_stp22xx_tlb_set_add_transition(struct tme_bus_connection *agent_conn_bus,
717 struct tme_bus_tlb_set_info *tlb_set_info)
718 {
719 struct tme_completion completion_buffer;
720 tme_completion_init(&completion_buffer);
721 tme_stp22xx_tlb_set_add(agent_conn_bus,
722 tlb_set_info,
723 &completion_buffer);
724 return (completion_buffer.tme_completion_error);
725 }
726
727 #endif /* TME_STP22XX_BUS_TRANSITION */
728
729 /* this adds a TLB set: */
730 void
tme_stp22xx_tlb_set_add(struct tme_bus_connection * agent_conn_bus,struct tme_bus_tlb_set_info * tlb_set_info,struct tme_completion * agent_completion)731 tme_stp22xx_tlb_set_add(struct tme_bus_connection *agent_conn_bus,
732 struct tme_bus_tlb_set_info *tlb_set_info,
733 struct tme_completion *agent_completion)
734 {
735 struct tme_stp22xx *stp22xx;
736
737 /* enter: */
738 stp22xx = tme_stp22xx_enter((struct tme_stp22xx *) agent_conn_bus->tme_bus_connection.tme_connection_element->tme_element_private);
739
740 /* if this TLB set provides a bus context register: */
741 if (tlb_set_info->tme_bus_tlb_set_info_bus_context != NULL) {
742
743 /* this bus only has one context: */
744 *tlb_set_info->tme_bus_tlb_set_info_bus_context = 0;
745 tlb_set_info->tme_bus_tlb_set_info_bus_context_max = 0;
746 }
747
748 /* leave: */
749 agent_completion->tme_completion_error = TME_OK;
750 tme_stp22xx_completion_validate(stp22xx, agent_completion);
751 tme_stp22xx_leave(stp22xx);
752 }
753
754 /* this notifies a condition: */
755 void
tme_stp22xx_cond_notify(struct tme_stp22xx_cond * cond)756 tme_stp22xx_cond_notify(struct tme_stp22xx_cond *cond)
757 {
758
759 /* if threading is cooperative, and this condition is idle: */
760 if (TME_THREADS_COOPERATIVE
761 && cond->tme_stp22xx_cond_state == TME_STP22XX_COND_STATE_IDLE) {
762
763 /* we have to assume that a thread has yielded waiting on this
764 condition, but because threading is cooperative, has already
765 set the condition back to idle. we can't mark the condition as
766 notified, since it would never get set back to idle: */
767 }
768
769 /* otherwise, threading is not cooperative, or the condition is not
770 idle: */
771 else {
772
773 /* this condition must be either running before waiting, or
774 waiting: */
775 assert (cond->tme_stp22xx_cond_state == TME_STP22XX_COND_STATE_RUNNING
776 || cond->tme_stp22xx_cond_state == TME_STP22XX_COND_STATE_WAITING);
777
778 /* this condition has been notified: */
779 cond->tme_stp22xx_cond_state = TME_STP22XX_COND_STATE_NOTIFIED;
780 }
781
782 /* notify the real condition: */
783 tme_cond_notify(&cond->tme_stp22xx_cond_cond, FALSE);
784 }
785
786 /* this initializes a condition: */
787 void
tme_stp22xx_cond_init(struct tme_stp22xx_cond * cond)788 tme_stp22xx_cond_init(struct tme_stp22xx_cond *cond)
789 {
790 /* this condition is idle: */
791 cond->tme_stp22xx_cond_state = TME_STP22XX_COND_STATE_IDLE;
792 tme_cond_init(&cond->tme_stp22xx_cond_cond);
793 }
794
795 /* this initializes an stp22xx: */
796 void
tme_stp22xx_init(struct tme_stp22xx * stp22xx,unsigned long _sizeof,tme_uint32_t conn_index_null)797 tme_stp22xx_init(struct tme_stp22xx *stp22xx,
798 unsigned long _sizeof,
799 tme_uint32_t conn_index_null)
800 {
801 unsigned long completion_i;
802
803 /* initialize the mutex: */
804 tme_mutex_init(&stp22xx->tme_stp22xx_mutex);
805
806 /* set the size of the structure: */
807 stp22xx->tme_stp22xx_sizeof = _sizeof;
808
809 /* set the undefined connection index: */
810 stp22xx->tme_stp22xx_conn_index_null = conn_index_null;
811
812 /* initialize the completions: */
813 for (completion_i = 0; completion_i < TME_STP22XX_COMPLETIONS_MAX; completion_i++) {
814 tme_completion_init(&stp22xx->tme_stp22xx_completions[completion_i]);
815 stp22xx->tme_stp22xx_completion_handlers[completion_i] = NULL;
816 }
817
818 /* initialize the delayed completions: */
819 for (completion_i = 0; completion_i < TME_STP22XX_COMPLETIONS_DELAYED_MAX; completion_i++) {
820 stp22xx->tme_stp22xx_completions_delayed[completion_i] = NULL;
821 }
822
823 /* there is no current bus master: */
824 stp22xx->tme_stp22xx_master_conn_index = conn_index_null;
825
826 #if TME_STP22XX_BUS_TRANSITION
827 /* initialize the token for filling TLB entries for slave cycles: */
828 tme_token_init(&stp22xx->tme_stp22xx_slave_cycle_tlb_token);
829 #endif /* TME_STP22XX_BUS_TRANSITION */
830 }
831