1 /*
2 * QEMU ESP/NCR53C9x emulation
3 *
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26 #include <assert.h>
27
28 #include "qemuuaeglue.h"
29 #include "queue.h"
30
31 //#include "hw/sysbus.h"
32 #include "scsi/scsi.h"
33 #include "scsi/esp.h"
34 //#include "trace.h"
35 //#include "qemu/log.h"
36
37 /*
38 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
39 * also produced as NCR89C100. See
40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
41 * and
42 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
43 */
44
45 #define TYPE_ESP "esp"
46 //#define ESP(obj) OBJECT_CHECK(SysBusESPState, (obj), TYPE_ESP)
47 #define ESP(obj) (ESPState*)obj->lsistate
48
esp_raise_irq(ESPState * s)49 static void esp_raise_irq(ESPState *s)
50 {
51 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
52 s->rregs[ESP_RSTAT] |= STAT_INT;
53 esp_irq_raise(s->irq);
54 }
55 }
56
esp_lower_irq(ESPState * s)57 static void esp_lower_irq(ESPState *s)
58 {
59 if (s->rregs[ESP_RSTAT] & STAT_INT) {
60 s->rregs[ESP_RSTAT] &= ~STAT_INT;
61 esp_irq_lower(s->irq);
62 }
63 }
64
esp_dma_enable(void * opaque,int level)65 void esp_dma_enable(void *opaque, int level)
66 {
67 ESPState *s = (ESPState*)opaque;
68 if (level) {
69 s->dma_enabled = 1;
70 if (s->dma_cb) {
71 if (s->dma_cb(s))
72 s->dma_cb = NULL;
73 }
74 } else {
75 s->dma_enabled = 0;
76 }
77 }
78
esp_request_cancelled(SCSIRequest * req)79 void esp_request_cancelled(SCSIRequest *req)
80 {
81 ESPState *s = (ESPState*)req->hba_private;
82
83 if (req == s->current_req) {
84 scsiesp_req_unref(s->current_req);
85 s->current_req = NULL;
86 s->current_dev = NULL;
87 }
88 }
89
get_cmd(ESPState * s,uint8_t * buf)90 static uint32_t get_cmd(ESPState *s, uint8_t *buf)
91 {
92 uint32_t dmalen;
93 int target;
94
95 target = s->wregs[ESP_WBUSID] & BUSID_DID;
96 if (s->dma) {
97 dmalen = s->rregs[ESP_TCLO];
98 dmalen |= s->rregs[ESP_TCMID] << 8;
99 dmalen |= s->rregs[ESP_TCHI] << 16;
100 s->dma_memory_read(s->dma_opaque, buf, dmalen);
101 } else {
102 dmalen = s->ti_size;
103 memcpy(buf, s->ti_buf, dmalen);
104 //buf[0] = buf[2] >> 5; // This makes no sense!
105 }
106
107 s->ti_size = 0;
108 s->ti_rptr = 0;
109 s->ti_wptr = 0;
110
111 if (s->current_req) {
112 /* Started a new command before the old one finished. Cancel it. */
113 scsiesp_req_cancel(s->current_req);
114 s->async_len = 0;
115 }
116
117 s->current_dev = scsiesp_device_find(&s->bus, 0, target, 0);
118 if (!s->current_dev) {
119 // No such drive
120 s->rregs[ESP_RSTAT] = 0;
121 s->rregs[ESP_RINTR] = INTR_DC;
122 s->rregs[ESP_RSEQ] = SEQ_0;
123 esp_raise_irq(s);
124 return 0;
125 }
126 return dmalen;
127 }
128
do_busid_cmd(ESPState * s,uint8_t * buf,uint8_t busid)129 static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
130 {
131 int32_t datalen;
132 int lun;
133 SCSIDevice *current_lun;
134
135 lun = busid & 7;
136 current_lun = scsiesp_device_find(&s->bus, 0, s->current_dev->id, lun);
137 if (!current_lun) {
138 s->rregs[ESP_RSTAT] = 0;
139 s->rregs[ESP_RINTR] = INTR_DC;
140 s->rregs[ESP_RSEQ] = SEQ_0;
141 esp_raise_irq(s);
142 return;
143 }
144 s->current_req = scsiesp_req_new(current_lun, 0, lun, buf, s);
145 datalen = scsiesp_req_enqueue(s->current_req);
146 s->ti_size = datalen;
147 if (datalen != 0) {
148 s->rregs[ESP_RSTAT] = 0;
149 if (s->dma) {
150 s->rregs[ESP_RSTAT] = STAT_TC;
151 s->dma_left = 0;
152 s->dma_counter = 0;
153 }
154 if (datalen > 0) {
155 s->rregs[ESP_RSTAT] |= STAT_DI;
156 } else {
157 s->rregs[ESP_RSTAT] |= STAT_DO;
158 }
159 scsiesp_req_continue(s->current_req);
160 }
161 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
162 s->rregs[ESP_RSEQ] = SEQ_CD;
163 esp_raise_irq(s);
164 }
165
do_cmd(ESPState * s,uint8_t * buf)166 static void do_cmd(ESPState *s, uint8_t *buf)
167 {
168 uint8_t busid = buf[0];
169
170 do_busid_cmd(s, &buf[1], busid);
171 }
172
handle_satn(ESPState * s)173 static int handle_satn(ESPState *s)
174 {
175 uint8_t buf[32];
176 int len;
177
178 if (s->dma && !s->dma_enabled) {
179 s->dma_cb = handle_satn;
180 return 1;
181 }
182 len = get_cmd(s, buf);
183 if (len)
184 do_cmd(s, buf);
185 return 1;
186 }
187
handle_s_without_atn(ESPState * s)188 static int handle_s_without_atn(ESPState *s)
189 {
190 uint8_t buf[32];
191 int len;
192
193 if (s->dma && !s->dma_enabled) {
194 s->dma_cb = handle_s_without_atn;
195 return 1;
196 }
197 len = get_cmd(s, buf);
198 if (len) {
199 do_busid_cmd(s, buf, 0);
200 }
201 return 1;
202 }
203
handle_satn_stop(ESPState * s)204 static int handle_satn_stop(ESPState *s)
205 {
206 if (s->dma && !s->dma_enabled) {
207 s->dma_cb = handle_satn_stop;
208 return 1;
209 }
210 s->cmdlen = get_cmd(s, s->cmdbuf);
211 if (s->cmdlen) {
212 s->do_cmd = 1;
213 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
214 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
215 s->rregs[ESP_RSEQ] = SEQ_CD;
216 esp_raise_irq(s);
217 }
218 return 1;
219 }
220
write_response(ESPState * s)221 static void write_response(ESPState *s)
222 {
223 // Multi Evolution driver reads FIFO after
224 // Message Accepted command. This makes
225 // sure wrong buffer is not read.
226 s->pio_on = 0;
227 s->async_buf = NULL;
228
229 s->ti_buf[0] = s->status;
230 s->ti_buf[1] = 0;
231 if (s->dma) {
232 s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
233 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
234 s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
235 s->rregs[ESP_RSEQ] = SEQ_CD;
236 } else {
237 s->ti_size = 2;
238 s->ti_rptr = 0;
239 s->ti_wptr = 0;
240 s->rregs[ESP_RFLAGS] = 2;
241 }
242 esp_raise_irq(s);
243 }
244
esp_dma_done(ESPState * s)245 static void esp_dma_done(ESPState *s)
246 {
247 s->rregs[ESP_RSTAT] |= STAT_TC;
248 s->rregs[ESP_RINTR] = INTR_BS;
249 s->rregs[ESP_RSEQ] = 0;
250 s->rregs[ESP_RFLAGS] = 0;
251 s->rregs[ESP_TCLO] = 0;
252 s->rregs[ESP_TCMID] = 0;
253 s->rregs[ESP_TCHI] = 0;
254 esp_raise_irq(s);
255 }
256
esp_do_dma(ESPState * s)257 static int esp_do_dma(ESPState *s)
258 {
259 int len, len2;
260 int to_device;
261
262 to_device = (s->ti_size < 0);
263 len = s->dma_left;
264 if (s->do_cmd) {
265 s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
266 s->ti_size = 0;
267 s->cmdlen = 0;
268 s->do_cmd = 0;
269 do_cmd(s, s->cmdbuf);
270 return 1;
271 }
272 if (s->async_len == 0) {
273 /* Defer until data is available. */
274 return 1;
275 }
276 if (len > s->async_len) {
277 len = s->async_len;
278 }
279 len2 = len;
280 s->dma_pending = len2;
281 if (to_device) {
282 len = s->dma_memory_read(s->dma_opaque, s->async_buf, len2);
283 } else {
284 len = s->dma_memory_write(s->dma_opaque, s->async_buf, len2);
285 }
286 if (len < 0)
287 len = len2;
288 s->dma_left -= len;
289 s->async_buf += len;
290 s->async_len -= len;
291 if (to_device)
292 s->ti_size += len;
293 else
294 s->ti_size -= len;
295 if (s->async_len == 0) {
296 scsiesp_req_continue(s->current_req);
297 /* If there is still data to be read from the device then
298 complete the DMA operation immediately. Otherwise defer
299 until the scsi layer has completed. */
300 if (to_device || s->dma_left != 0 || s->ti_size == 0) {
301 return 1;
302 }
303 }
304
305 if (len2 > len && s->dma_left > 0)
306 return 0;
307
308 /* Partially filled a scsi buffer. Complete immediately. */
309 esp_dma_done(s);
310 return 1;
311 }
312
esp_fake_dma_done(void * opaque)313 void esp_fake_dma_done(void *opaque)
314 {
315 ESPState *s = (ESPState*)opaque;
316 int to_device = (s->ti_size < 0);
317 int len = s->dma_pending;
318
319 s->dma_pending = 0;
320 s->dma_left -= len;
321 s->async_buf += len;
322 s->async_len -= len;
323 if (to_device)
324 s->ti_size += len;
325 else
326 s->ti_size -= len;
327 if (s->async_len == 0) {
328 scsiesp_req_continue(s->current_req);
329 } else {
330 esp_do_dma(s);
331 }
332 }
333
esp_command_complete(SCSIRequest * req,uint32_t status,size_t resid)334 void esp_command_complete(SCSIRequest *req, uint32_t status,
335 size_t resid)
336 {
337 ESPState *s = (ESPState*)req->hba_private;
338
339 s->ti_size = 0;
340 s->dma_left = 0;
341 s->dma_pending = 0;
342 s->async_len = 0;
343 s->status = status;
344 s->rregs[ESP_RSTAT] = STAT_ST;
345 esp_dma_done(s);
346 if (s->current_req) {
347 scsiesp_req_unref(s->current_req);
348 s->current_req = NULL;
349 s->current_dev = NULL;
350 }
351 }
352
esp_transfer_data(SCSIRequest * req,uint32_t len)353 void esp_transfer_data(SCSIRequest *req, uint32_t len)
354 {
355 ESPState *s = (ESPState*)req->hba_private;
356
357 s->async_len = len;
358 s->async_buf = scsiesp_req_get_buf(req);
359 if (s->dma_left) {
360 esp_do_dma(s);
361 } else if (s->dma_counter != 0 && s->ti_size == 0) {
362 /* If this was the last part of a DMA transfer then the
363 completion interrupt is deferred to here. */
364 esp_dma_done(s);
365 }
366 }
367
esp_dreq(DeviceState * dev)368 bool esp_dreq(DeviceState *dev)
369 {
370 ESPState *s = ESP(dev);
371 return s->dma_cb != NULL;
372 }
373
handle_ti(ESPState * s)374 static int handle_ti(ESPState *s)
375 {
376 uint32_t dmalen, minlen;
377
378 if (s->dma && !s->dma_enabled) {
379 s->dma_cb = handle_ti;
380 return 1;
381 }
382
383 dmalen = s->rregs[ESP_TCLO];
384 dmalen |= s->rregs[ESP_TCMID] << 8;
385 dmalen |= s->rregs[ESP_TCHI] << 16;
386 if (dmalen == 0) {
387 dmalen = 0x10000;
388 }
389 s->dma_counter = dmalen;
390
391 if (s->do_cmd)
392 minlen = (dmalen < 32) ? dmalen : 32;
393 else if (s->ti_size < 0)
394 minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
395 else
396 minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
397 if (s->dma) {
398 if (s->dma == 1)
399 s->dma_left = minlen;
400 s->dma = 2;
401 s->rregs[ESP_RSTAT] &= ~STAT_TC;
402 if (!esp_do_dma(s)) {
403 s->dma_cb = handle_ti;
404 return 0;
405 }
406 return 1;
407 } else if (s->do_cmd) {
408 s->ti_size = 0;
409 s->cmdlen = 0;
410 s->do_cmd = 0;
411 do_cmd(s, s->cmdbuf);
412 return 1;
413 } else {
414 // no dma
415 s->rregs[ESP_RINTR] = INTR_BS;
416 esp_raise_irq(s);
417 }
418 return 1;
419 }
420
esp_hard_reset(ESPState * s)421 void esp_hard_reset(ESPState *s)
422 {
423 memset(s->rregs, 0, ESP_REGS);
424 memset(s->wregs, 0, ESP_REGS);
425 s->rregs[ESP_TCHI] = s->chip_id;
426 s->ti_size = 0;
427 s->ti_rptr = 0;
428 s->ti_wptr = 0;
429 s->dma = 0;
430 s->do_cmd = 0;
431 s->dma_cb = NULL;
432
433 s->rregs[ESP_CFG1] = 7;
434 }
435
esp_soft_reset(ESPState * s)436 static void esp_soft_reset(ESPState *s)
437 {
438 esp_hard_reset(s);
439 }
440
parent_esp_reset(ESPState * s,int irq,int level)441 static void parent_esp_reset(ESPState *s, int irq, int level)
442 {
443 if (level) {
444 esp_soft_reset(s);
445 }
446 }
447
esp_reg_read(void * opaque,uint32_t saddr)448 uint64_t esp_reg_read(void *opaque, uint32_t saddr)
449 {
450 ESPState *s = (ESPState*)opaque;
451 uint32_t old_val;
452
453 switch (saddr) {
454 case ESP_FIFO:
455 if (s->ti_size > 0) {
456 s->ti_size--;
457 if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0 || s->pio_on) {
458 /* Data out. */
459 //write_log("esp: PIO data read not implemented\n");
460 if (s->async_buf) {
461 s->rregs[ESP_FIFO] = s->async_buf[s->ti_rptr++];
462 s->pio_on = 1;
463 } else {
464 s->rregs[ESP_FIFO] = 0;
465 }
466 if (s->ti_size == 1 && s->current_req) {
467 scsiesp_req_continue(s->current_req);
468 }
469 } else {
470 s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
471 }
472 esp_raise_irq(s);
473 }
474 if (s->ti_size == 0) {
475 s->ti_rptr = 0;
476 s->ti_wptr = 0;
477 s->pio_on = 0;
478 }
479 break;
480 case ESP_RINTR:
481 /* Clear sequence step, interrupt register and all status bits
482 except TC */
483 old_val = s->rregs[ESP_RINTR];
484 s->rregs[ESP_RINTR] = 0;
485 s->rregs[ESP_RSTAT] &= ~STAT_TC;
486 s->rregs[ESP_RSEQ] = SEQ_CD;
487 esp_lower_irq(s);
488
489 return old_val;
490 case ESP_RFLAGS:
491 {
492 int v;
493 if (s->ti_size >= 7)
494 v = 31;
495 else
496 v = (1 << s->ti_size) - 1;
497 return v | (s->rregs[ESP_RSEQ] << 5);
498 }
499 case ESP_RES4:
500 return 0x80 | 0x20 | 0x2;
501 default:
502 //write_log("read unknown 53c94 register %02x\n", saddr);
503 break;
504 }
505 return s->rregs[saddr];
506 }
507
esp_reg_write(void * opaque,uint32_t saddr,uint64_t val)508 void esp_reg_write(void *opaque, uint32_t saddr, uint64_t val)
509 {
510 ESPState *s = (ESPState*)opaque;
511
512 switch (saddr) {
513 case ESP_TCLO:
514 case ESP_TCMID:
515 case ESP_TCHI:
516 s->rregs[ESP_RSTAT] &= ~STAT_TC;
517 break;
518 case ESP_FIFO:
519 if (s->do_cmd) {
520 s->cmdbuf[s->cmdlen++] = val & 0xff;
521 } else if (s->ti_size == TI_BUFSZ - 1) {
522 ;
523 } else {
524 s->ti_size++;
525 s->ti_buf[s->ti_wptr++] = val & 0xff;
526 }
527 break;
528 case ESP_CMD:
529 s->rregs[saddr] = val;
530 if (val & CMD_DMA) {
531 s->dma = 1;
532 /* Reload DMA counter. */
533 s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
534 s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
535 s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
536 } else {
537 s->dma = 0;
538 }
539 switch(val & CMD_CMD) {
540 case CMD_NOP:
541 break;
542 case CMD_FLUSH:
543 //s->ti_size = 0;
544 s->rregs[ESP_RINTR] = INTR_FC;
545 s->rregs[ESP_RSEQ] = 0;
546 s->rregs[ESP_RFLAGS] = 0;
547 break;
548 case CMD_RESET:
549 esp_soft_reset(s);
550 // E-Matrix 530 detects existence of SCSI chip by
551 // writing CMD_RESET and then immediately checking
552 // if it reads back.
553 s->rregs[saddr] = CMD_RESET;
554 break;
555 case CMD_BUSRESET:
556 s->rregs[ESP_RINTR] = INTR_RST;
557 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
558 esp_raise_irq(s);
559 }
560 break;
561 case CMD_TI:
562 handle_ti(s);
563 break;
564 case CMD_ICCS:
565 write_response(s);
566 s->rregs[ESP_RINTR] = INTR_FC;
567 s->rregs[ESP_RSTAT] |= STAT_MI;
568 break;
569 case CMD_MSGACC:
570 s->rregs[ESP_RINTR] = INTR_DC;
571 s->rregs[ESP_RSEQ] = 0;
572 s->rregs[ESP_RFLAGS] = 0;
573 // Masoboshi driver expects phase=0!
574 s->rregs[ESP_RSTAT] &= ~7;
575 esp_raise_irq(s);
576 break;
577 case CMD_PAD:
578 s->rregs[ESP_RSTAT] = STAT_TC;
579 s->rregs[ESP_RINTR] = INTR_FC;
580 s->rregs[ESP_RSEQ] = 0;
581 break;
582 case CMD_SATN:
583 break;
584 case CMD_RSTATN:
585 break;
586 case CMD_SEL:
587 handle_s_without_atn(s);
588 break;
589 case CMD_SELATN:
590 handle_satn(s);
591 break;
592 case CMD_SELATNS:
593 handle_satn_stop(s);
594 break;
595 case CMD_ENSEL:
596 s->rregs[ESP_RINTR] = 0;
597 break;
598 case CMD_DISSEL:
599 // Masoboshi driver expects Function Complete.
600 s->rregs[ESP_RINTR] = INTR_FC;
601 esp_raise_irq(s);
602 break;
603 default:
604 break;
605 }
606 break;
607 case ESP_WBUSID:
608 case ESP_WSEL:
609 case ESP_WSYNTP:
610 case ESP_WSYNO:
611 break;
612 case ESP_CFG1:
613 case ESP_CFG2: case ESP_CFG3:
614 case ESP_RES3: case ESP_RES4:
615 s->rregs[saddr] = val;
616 break;
617 case ESP_WCCF:
618 case ESP_WTEST:
619 break;
620 default:
621 write_log("write unknown 53c94 register %02x\n", saddr);
622 //activate_debugger();
623 return;
624 }
625 s->wregs[saddr] = val;
626 }
627
esp_mem_accepts(void * opaque,hwaddr addr,unsigned size,bool is_write)628 static bool esp_mem_accepts(void *opaque, hwaddr addr,
629 unsigned size, bool is_write)
630 {
631 return (size == 1) || (is_write && size == 4);
632 }
633
634 #if 0
635 const VMStateDescription vmstate_esp = {
636 .name ="esp",
637 .version_id = 3,
638 .minimum_version_id = 3,
639 .minimum_version_id_old = 3,
640 .fields = (VMStateField []) {
641 VMSTATE_BUFFER(rregs, ESPState),
642 VMSTATE_BUFFER(wregs, ESPState),
643 VMSTATE_INT32(ti_size, ESPState),
644 VMSTATE_UINT32(ti_rptr, ESPState),
645 VMSTATE_UINT32(ti_wptr, ESPState),
646 VMSTATE_BUFFER(ti_buf, ESPState),
647 VMSTATE_UINT32(status, ESPState),
648 VMSTATE_UINT32(dma, ESPState),
649 VMSTATE_BUFFER(cmdbuf, ESPState),
650 VMSTATE_UINT32(cmdlen, ESPState),
651 VMSTATE_UINT32(do_cmd, ESPState),
652 VMSTATE_UINT32(dma_left, ESPState),
653 VMSTATE_END_OF_LIST()
654 }
655 };
656 #endif
657
658 typedef struct {
659 /*< private >*/
660 SysBusDevice parent_obj;
661 /*< public >*/
662
663 MemoryRegion iomem;
664 uint32_t it_shift;
665 ESPState esp;
666 } SysBusESPState;
667
668 #if 0
669 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
670 uint64_t val, unsigned int size)
671 {
672 SysBusESPState *sysbus = (SysBusESPState*)opaque;
673 uint32_t saddr;
674
675 saddr = addr >> sysbus->it_shift;
676 esp_reg_write(&sysbus->esp, saddr, val);
677 }
678
679 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
680 unsigned int size)
681 {
682 SysBusESPState *sysbus = (SysBusESPState*)opaque;
683 uint32_t saddr;
684
685 saddr = addr >> sysbus->it_shift;
686 return esp_reg_read(&sysbus->esp, saddr);
687 }
688
689 static const MemoryRegionOps sysbus_esp_mem_ops = {
690 .read = sysbus_esp_mem_read,
691 .write = sysbus_esp_mem_write,
692 .endianness = DEVICE_NATIVE_ENDIAN,
693 .valid.accepts = esp_mem_accepts,
694 };
695
696 void esp_init(hwaddr espaddr, int it_shift,
697 ESPDMAMemoryReadWriteFunc dma_memory_read,
698 ESPDMAMemoryReadWriteFunc dma_memory_write,
699 void *dma_opaque, qemu_irq irq, qemu_irq *reset,
700 qemu_irq *dma_enable)
701 {
702 DeviceState *dev;
703 SysBusDevice *s;
704 SysBusESPState *sysbus;
705 ESPState *esp;
706
707 dev = qdev_create(NULL, TYPE_ESP);
708 sysbus = ESP(dev);
709 esp = &sysbus->esp;
710 esp->dma_memory_read = dma_memory_read;
711 esp->dma_memory_write = dma_memory_write;
712 esp->dma_opaque = dma_opaque;
713 sysbus->it_shift = it_shift;
714 /* XXX for now until rc4030 has been changed to use DMA enable signal */
715 esp->dma_enabled = 1;
716 qdev_init_nofail(dev);
717 s = SYS_BUS_DEVICE(dev);
718 sysbus_connect_irq(s, 0, irq);
719 sysbus_mmio_map(s, 0, espaddr);
720 *reset = qdev_get_gpio_in(dev, 0);
721 *dma_enable = qdev_get_gpio_in(dev, 1);
722 }
723
724 static const struct SCSIBusInfo esp_scsi_info = {
725 .tcq = false,
726 .max_target = ESP_MAX_DEVS,
727 .max_lun = 7,
728
729 .transfer_data = esp_transfer_data,
730 .complete = esp_command_complete,
731 .cancel = esp_request_cancelled
732 };
733
734 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
735 {
736 SysBusESPState *sysbus = ESP(opaque);
737 ESPState *s = &sysbus->esp;
738
739 switch (irq) {
740 case 0:
741 parent_esp_reset(s, irq, level);
742 break;
743 case 1:
744 esp_dma_enable(opaque, irq, level);
745 break;
746 }
747 }
748
749 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
750 {
751 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
752 SysBusESPState *sysbus = ESP(dev);
753 ESPState *s = &sysbus->esp;
754 Error *err = NULL;
755
756 sysbus_init_irq(sbd, &s->irq);
757 assert(sysbus->it_shift != -1);
758
759 s->chip_id = TCHI_FAS100A;
760 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
761 sysbus, "esp", ESP_REGS << sysbus->it_shift);
762 sysbus_init_mmio(sbd, &sysbus->iomem);
763
764 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
765
766 scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
767 scsi_bus_legacy_handle_cmdline(&s->bus, &err);
768 if (err != NULL) {
769 error_propagate(errp, err);
770 return;
771 }
772 }
773
774 static void sysbus_esp_hard_reset(DeviceState *dev)
775 {
776 SysBusESPState *sysbus = ESP(dev);
777 esp_hard_reset(&sysbus->esp);
778 }
779
780 static const VMStateDescription vmstate_sysbus_esp_scsi = {
781 .name = "sysbusespscsi",
782 .version_id = 0,
783 .minimum_version_id = 0,
784 .minimum_version_id_old = 0,
785 .fields = (VMStateField[]) {
786 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
787 VMSTATE_END_OF_LIST()
788 }
789 };
790
791 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
792 {
793 DeviceClass *dc = DEVICE_CLASS(klass);
794
795 dc->realize = sysbus_esp_realize;
796 dc->reset = sysbus_esp_hard_reset;
797 dc->vmsd = &vmstate_sysbus_esp_scsi;
798 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
799 }
800
801 static const TypeInfo sysbus_esp_info = {
802 .name = TYPE_ESP,
803 .parent = TYPE_SYS_BUS_DEVICE,
804 .instance_size = sizeof(SysBusESPState),
805 .class_init = sysbus_esp_class_init,
806 };
807
808 static void esp_register_types(void)
809 {
810 type_register_static(&sysbus_esp_info);
811 }
812
813 type_init(esp_register_types)
814 #endif
815
esp_scsi_init(DeviceState * dev,ESPDMAMemoryReadWriteFunc read,ESPDMAMemoryReadWriteFunc write)816 void esp_scsi_init(DeviceState *dev, ESPDMAMemoryReadWriteFunc read, ESPDMAMemoryReadWriteFunc write)
817 {
818 dev->lsistate = calloc(sizeof(ESPState), 1);
819 ESPState *s = ESP(dev);
820 s->dma_memory_read = read;
821 s->dma_memory_write = write;
822 }
823
esp_scsi_reset(DeviceState * dev,void * privdata)824 void esp_scsi_reset(DeviceState *dev, void *privdata)
825 {
826 ESPState *s = ESP(dev);
827
828 esp_soft_reset(s);
829 s->bus.privdata = privdata;
830 s->irq = privdata;
831 s->dma_opaque = privdata;
832 }
833