esp.c (1bcaf71b) esp.c (c5d7df28)
1/*
2 * QEMU ESP/NCR53C9x emulation
3 *
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26#include "qemu/osdep.h"
27#include "hw/sysbus.h"
28#include "migration/vmstate.h"
29#include "hw/irq.h"
30#include "hw/scsi/esp.h"
31#include "trace.h"
32#include "qemu/log.h"
33#include "qemu/module.h"
34
35/*
36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37 * also produced as NCR89C100. See
38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39 * and
40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41 *
42 * On Macintosh Quadra it is a NCR53C96.
43 */
44
45static void esp_raise_irq(ESPState *s)
46{
47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48 s->rregs[ESP_RSTAT] |= STAT_INT;
49 qemu_irq_raise(s->irq);
50 trace_esp_raise_irq();
51 }
52}
53
54static void esp_lower_irq(ESPState *s)
55{
56 if (s->rregs[ESP_RSTAT] & STAT_INT) {
57 s->rregs[ESP_RSTAT] &= ~STAT_INT;
58 qemu_irq_lower(s->irq);
59 trace_esp_lower_irq();
60 }
61}
62
63static void esp_raise_drq(ESPState *s)
64{
65 qemu_irq_raise(s->irq_data);
66 trace_esp_raise_drq();
67}
68
69static void esp_lower_drq(ESPState *s)
70{
71 qemu_irq_lower(s->irq_data);
72 trace_esp_lower_drq();
73}
74
75void esp_dma_enable(ESPState *s, int irq, int level)
76{
77 if (level) {
78 s->dma_enabled = 1;
79 trace_esp_dma_enable();
80 if (s->dma_cb) {
81 s->dma_cb(s);
82 s->dma_cb = NULL;
83 }
84 } else {
85 trace_esp_dma_disable();
86 s->dma_enabled = 0;
87 }
88}
89
90void esp_request_cancelled(SCSIRequest *req)
91{
92 ESPState *s = req->hba_private;
93
94 if (req == s->current_req) {
95 scsi_req_unref(s->current_req);
96 s->current_req = NULL;
97 s->current_dev = NULL;
98 s->async_len = 0;
99 }
100}
101
102static void esp_fifo_push(Fifo8 *fifo, uint8_t val)
103{
104 if (fifo8_num_used(fifo) == fifo->capacity) {
105 trace_esp_error_fifo_overrun();
106 return;
107 }
108
109 fifo8_push(fifo, val);
110}
111
112static uint8_t esp_fifo_pop(Fifo8 *fifo)
113{
114 if (fifo8_is_empty(fifo)) {
115 return 0;
116 }
117
118 return fifo8_pop(fifo);
119}
120
121static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
122{
123 const uint8_t *buf;
124 uint32_t n, n2;
125 int len;
126
127 if (maxlen == 0) {
128 return 0;
129 }
130
131 len = maxlen;
132 buf = fifo8_pop_buf(fifo, len, &n);
133 if (dest) {
134 memcpy(dest, buf, n);
135 }
136
137 /* Add FIFO wraparound if needed */
138 len -= n;
139 len = MIN(len, fifo8_num_used(fifo));
140 if (len) {
141 buf = fifo8_pop_buf(fifo, len, &n2);
142 if (dest) {
143 memcpy(&dest[n], buf, n2);
144 }
145 n += n2;
146 }
147
148 return n;
149}
150
151static uint32_t esp_get_tc(ESPState *s)
152{
153 uint32_t dmalen;
154
155 dmalen = s->rregs[ESP_TCLO];
156 dmalen |= s->rregs[ESP_TCMID] << 8;
157 dmalen |= s->rregs[ESP_TCHI] << 16;
158
159 return dmalen;
160}
161
162static void esp_set_tc(ESPState *s, uint32_t dmalen)
163{
1/*
2 * QEMU ESP/NCR53C9x emulation
3 *
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26#include "qemu/osdep.h"
27#include "hw/sysbus.h"
28#include "migration/vmstate.h"
29#include "hw/irq.h"
30#include "hw/scsi/esp.h"
31#include "trace.h"
32#include "qemu/log.h"
33#include "qemu/module.h"
34
35/*
36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
37 * also produced as NCR89C100. See
38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
39 * and
40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
41 *
42 * On Macintosh Quadra it is a NCR53C96.
43 */
44
45static void esp_raise_irq(ESPState *s)
46{
47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
48 s->rregs[ESP_RSTAT] |= STAT_INT;
49 qemu_irq_raise(s->irq);
50 trace_esp_raise_irq();
51 }
52}
53
54static void esp_lower_irq(ESPState *s)
55{
56 if (s->rregs[ESP_RSTAT] & STAT_INT) {
57 s->rregs[ESP_RSTAT] &= ~STAT_INT;
58 qemu_irq_lower(s->irq);
59 trace_esp_lower_irq();
60 }
61}
62
63static void esp_raise_drq(ESPState *s)
64{
65 qemu_irq_raise(s->irq_data);
66 trace_esp_raise_drq();
67}
68
69static void esp_lower_drq(ESPState *s)
70{
71 qemu_irq_lower(s->irq_data);
72 trace_esp_lower_drq();
73}
74
75void esp_dma_enable(ESPState *s, int irq, int level)
76{
77 if (level) {
78 s->dma_enabled = 1;
79 trace_esp_dma_enable();
80 if (s->dma_cb) {
81 s->dma_cb(s);
82 s->dma_cb = NULL;
83 }
84 } else {
85 trace_esp_dma_disable();
86 s->dma_enabled = 0;
87 }
88}
89
90void esp_request_cancelled(SCSIRequest *req)
91{
92 ESPState *s = req->hba_private;
93
94 if (req == s->current_req) {
95 scsi_req_unref(s->current_req);
96 s->current_req = NULL;
97 s->current_dev = NULL;
98 s->async_len = 0;
99 }
100}
101
102static void esp_fifo_push(Fifo8 *fifo, uint8_t val)
103{
104 if (fifo8_num_used(fifo) == fifo->capacity) {
105 trace_esp_error_fifo_overrun();
106 return;
107 }
108
109 fifo8_push(fifo, val);
110}
111
112static uint8_t esp_fifo_pop(Fifo8 *fifo)
113{
114 if (fifo8_is_empty(fifo)) {
115 return 0;
116 }
117
118 return fifo8_pop(fifo);
119}
120
121static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
122{
123 const uint8_t *buf;
124 uint32_t n, n2;
125 int len;
126
127 if (maxlen == 0) {
128 return 0;
129 }
130
131 len = maxlen;
132 buf = fifo8_pop_buf(fifo, len, &n);
133 if (dest) {
134 memcpy(dest, buf, n);
135 }
136
137 /* Add FIFO wraparound if needed */
138 len -= n;
139 len = MIN(len, fifo8_num_used(fifo));
140 if (len) {
141 buf = fifo8_pop_buf(fifo, len, &n2);
142 if (dest) {
143 memcpy(&dest[n], buf, n2);
144 }
145 n += n2;
146 }
147
148 return n;
149}
150
151static uint32_t esp_get_tc(ESPState *s)
152{
153 uint32_t dmalen;
154
155 dmalen = s->rregs[ESP_TCLO];
156 dmalen |= s->rregs[ESP_TCMID] << 8;
157 dmalen |= s->rregs[ESP_TCHI] << 16;
158
159 return dmalen;
160}
161
162static void esp_set_tc(ESPState *s, uint32_t dmalen)
163{
164 uint32_t old_tc = esp_get_tc(s);
165
164 s->rregs[ESP_TCLO] = dmalen;
165 s->rregs[ESP_TCMID] = dmalen >> 8;
166 s->rregs[ESP_TCHI] = dmalen >> 16;
166 s->rregs[ESP_TCLO] = dmalen;
167 s->rregs[ESP_TCMID] = dmalen >> 8;
168 s->rregs[ESP_TCHI] = dmalen >> 16;
169
170 if (old_tc && dmalen == 0) {
171 s->rregs[ESP_RSTAT] |= STAT_TC;
172 }
167}
168
169static uint32_t esp_get_stc(ESPState *s)
170{
171 uint32_t dmalen;
172
173 dmalen = s->wregs[ESP_TCLO];
174 dmalen |= s->wregs[ESP_TCMID] << 8;
175 dmalen |= s->wregs[ESP_TCHI] << 16;
176
177 return dmalen;
178}
179
180static uint8_t esp_pdma_read(ESPState *s)
181{
182 uint8_t val;
183
184 if (s->do_cmd) {
185 val = esp_fifo_pop(&s->cmdfifo);
186 } else {
187 val = esp_fifo_pop(&s->fifo);
188 }
189
190 return val;
191}
192
193static void esp_pdma_write(ESPState *s, uint8_t val)
194{
195 uint32_t dmalen = esp_get_tc(s);
196
197 if (dmalen == 0) {
198 return;
199 }
200
201 if (s->do_cmd) {
202 esp_fifo_push(&s->cmdfifo, val);
203 } else {
204 esp_fifo_push(&s->fifo, val);
205 }
206
207 dmalen--;
208 esp_set_tc(s, dmalen);
209}
210
211static void esp_set_pdma_cb(ESPState *s, enum pdma_cb cb)
212{
213 s->pdma_cb = cb;
214}
215
216static int esp_select(ESPState *s)
217{
218 int target;
219
220 target = s->wregs[ESP_WBUSID] & BUSID_DID;
221
222 s->ti_size = 0;
223
224 if (s->current_req) {
225 /* Started a new command before the old one finished. Cancel it. */
226 scsi_req_cancel(s->current_req);
227 }
228
229 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
230 if (!s->current_dev) {
231 /* No such drive */
232 s->rregs[ESP_RSTAT] = 0;
233 s->rregs[ESP_RINTR] = INTR_DC;
234 s->rregs[ESP_RSEQ] = SEQ_0;
235 esp_raise_irq(s);
236 return -1;
237 }
238
239 /*
240 * Note that we deliberately don't raise the IRQ here: this will be done
241 * either in do_command_phase() for DATA OUT transfers or by the deferred
242 * IRQ mechanism in esp_transfer_data() for DATA IN transfers
243 */
244 s->rregs[ESP_RINTR] |= INTR_FC;
245 s->rregs[ESP_RSEQ] = SEQ_CD;
246 return 0;
247}
248
249static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
250{
251 uint8_t buf[ESP_CMDFIFO_SZ];
252 uint32_t dmalen, n;
253 int target;
254
255 target = s->wregs[ESP_WBUSID] & BUSID_DID;
256 if (s->dma) {
257 dmalen = MIN(esp_get_tc(s), maxlen);
258 if (dmalen == 0) {
259 return 0;
260 }
261 if (s->dma_memory_read) {
262 s->dma_memory_read(s->dma_opaque, buf, dmalen);
263 dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen);
264 fifo8_push_all(&s->cmdfifo, buf, dmalen);
265 } else {
266 return 0;
267 }
268 } else {
269 dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
270 if (dmalen == 0) {
271 return 0;
272 }
273 n = esp_fifo_pop_buf(&s->fifo, buf, dmalen);
274 n = MIN(fifo8_num_free(&s->cmdfifo), n);
275 fifo8_push_all(&s->cmdfifo, buf, n);
276 }
277 trace_esp_get_cmd(dmalen, target);
278
279 return dmalen;
280}
281
282static void do_command_phase(ESPState *s)
283{
284 uint32_t cmdlen;
285 int32_t datalen;
286 SCSIDevice *current_lun;
287 uint8_t buf[ESP_CMDFIFO_SZ];
288
289 trace_esp_do_command_phase(s->lun);
290 cmdlen = fifo8_num_used(&s->cmdfifo);
291 if (!cmdlen || !s->current_dev) {
292 return;
293 }
294 esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen);
295
296 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
297 if (!current_lun) {
298 /* No such drive */
299 s->rregs[ESP_RSTAT] = 0;
300 s->rregs[ESP_RINTR] = INTR_DC;
301 s->rregs[ESP_RSEQ] = SEQ_0;
302 esp_raise_irq(s);
303 return;
304 }
305
306 s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
307 datalen = scsi_req_enqueue(s->current_req);
308 s->ti_size = datalen;
309 fifo8_reset(&s->cmdfifo);
310 if (datalen != 0) {
311 s->rregs[ESP_RSTAT] = STAT_TC;
312 s->rregs[ESP_RSEQ] = SEQ_CD;
313 s->ti_cmd = 0;
314 esp_set_tc(s, 0);
315 if (datalen > 0) {
316 /*
317 * Switch to DATA IN phase but wait until initial data xfer is
318 * complete before raising the command completion interrupt
319 */
320 s->data_in_ready = false;
321 s->rregs[ESP_RSTAT] |= STAT_DI;
322 } else {
323 s->rregs[ESP_RSTAT] |= STAT_DO;
324 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
325 esp_raise_irq(s);
326 esp_lower_drq(s);
327 }
328 scsi_req_continue(s->current_req);
329 return;
330 }
331}
332
333static void do_message_phase(ESPState *s)
334{
335 if (s->cmdfifo_cdb_offset) {
336 uint8_t message = esp_fifo_pop(&s->cmdfifo);
337
338 trace_esp_do_identify(message);
339 s->lun = message & 7;
340 s->cmdfifo_cdb_offset--;
341 }
342
343 /* Ignore extended messages for now */
344 if (s->cmdfifo_cdb_offset) {
345 int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
346 esp_fifo_pop_buf(&s->cmdfifo, NULL, len);
347 s->cmdfifo_cdb_offset = 0;
348 }
349}
350
351static void do_cmd(ESPState *s)
352{
353 do_message_phase(s);
354 assert(s->cmdfifo_cdb_offset == 0);
355 do_command_phase(s);
356}
357
358static void satn_pdma_cb(ESPState *s)
359{
360 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
361 s->cmdfifo_cdb_offset = 1;
362 s->do_cmd = 0;
363 do_cmd(s);
364 }
365}
366
367static void handle_satn(ESPState *s)
368{
369 int32_t cmdlen;
370
371 if (s->dma && !s->dma_enabled) {
372 s->dma_cb = handle_satn;
373 return;
374 }
375 esp_set_pdma_cb(s, SATN_PDMA_CB);
376 if (esp_select(s) < 0) {
377 return;
378 }
379 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
380 if (cmdlen > 0) {
381 s->cmdfifo_cdb_offset = 1;
382 s->do_cmd = 0;
383 do_cmd(s);
384 } else if (cmdlen == 0) {
385 if (s->dma) {
386 esp_raise_drq(s);
387 }
388 s->do_cmd = 1;
389 /* Target present, but no cmd yet - switch to command phase */
390 s->rregs[ESP_RSEQ] = SEQ_CD;
391 s->rregs[ESP_RSTAT] = STAT_CD;
392 }
393}
394
395static void s_without_satn_pdma_cb(ESPState *s)
396{
397 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
398 s->cmdfifo_cdb_offset = 0;
399 s->do_cmd = 0;
400 do_cmd(s);
401 }
402}
403
404static void handle_s_without_atn(ESPState *s)
405{
406 int32_t cmdlen;
407
408 if (s->dma && !s->dma_enabled) {
409 s->dma_cb = handle_s_without_atn;
410 return;
411 }
412 esp_set_pdma_cb(s, S_WITHOUT_SATN_PDMA_CB);
413 if (esp_select(s) < 0) {
414 return;
415 }
416 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
417 if (cmdlen > 0) {
418 s->cmdfifo_cdb_offset = 0;
419 s->do_cmd = 0;
420 do_cmd(s);
421 } else if (cmdlen == 0) {
422 if (s->dma) {
423 esp_raise_drq(s);
424 }
425 s->do_cmd = 1;
426 /* Target present, but no cmd yet - switch to command phase */
427 s->rregs[ESP_RSEQ] = SEQ_CD;
428 s->rregs[ESP_RSTAT] = STAT_CD;
429 }
430}
431
432static void satn_stop_pdma_cb(ESPState *s)
433{
434 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
435 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
436 s->do_cmd = 1;
437 s->cmdfifo_cdb_offset = 1;
438 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
439 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
440 s->rregs[ESP_RSEQ] = SEQ_CD;
441 esp_raise_irq(s);
442 }
443}
444
445static void handle_satn_stop(ESPState *s)
446{
447 int32_t cmdlen;
448
449 if (s->dma && !s->dma_enabled) {
450 s->dma_cb = handle_satn_stop;
451 return;
452 }
453 esp_set_pdma_cb(s, SATN_STOP_PDMA_CB);
454 if (esp_select(s) < 0) {
455 return;
456 }
457 cmdlen = get_cmd(s, 1);
458 if (cmdlen > 0) {
459 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
460 s->do_cmd = 1;
461 s->cmdfifo_cdb_offset = 1;
462 s->rregs[ESP_RSTAT] = STAT_MO;
463 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
464 s->rregs[ESP_RSEQ] = SEQ_MO;
465 esp_raise_irq(s);
466 } else if (cmdlen == 0) {
467 if (s->dma) {
468 esp_raise_drq(s);
469 }
470 s->do_cmd = 1;
471 /* Target present, switch to message out phase */
472 s->rregs[ESP_RSEQ] = SEQ_MO;
473 s->rregs[ESP_RSTAT] = STAT_MO;
474 }
475}
476
477static void write_response_pdma_cb(ESPState *s)
478{
479 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
480 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
481 s->rregs[ESP_RSEQ] = SEQ_CD;
482 esp_raise_irq(s);
483}
484
485static void write_response(ESPState *s)
486{
487 uint8_t buf[2];
488
489 trace_esp_write_response(s->status);
490
491 buf[0] = s->status;
492 buf[1] = 0;
493
494 if (s->dma) {
495 if (s->dma_memory_write) {
496 s->dma_memory_write(s->dma_opaque, buf, 2);
497 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
498 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
499 s->rregs[ESP_RSEQ] = SEQ_CD;
500 } else {
501 esp_set_pdma_cb(s, WRITE_RESPONSE_PDMA_CB);
502 esp_raise_drq(s);
503 return;
504 }
505 } else {
506 fifo8_reset(&s->fifo);
507 fifo8_push_all(&s->fifo, buf, 2);
508 s->rregs[ESP_RFLAGS] = 2;
509 }
510 esp_raise_irq(s);
511}
512
513static void esp_dma_done(ESPState *s)
514{
515 s->rregs[ESP_RSTAT] |= STAT_TC;
516 s->rregs[ESP_RINTR] |= INTR_BS;
517 s->rregs[ESP_RFLAGS] = 0;
518 esp_set_tc(s, 0);
519 esp_raise_irq(s);
520}
521
522static void do_dma_pdma_cb(ESPState *s)
523{
524 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
525 int len;
526 uint32_t n;
527
528 if (s->do_cmd) {
529 /* Ensure we have received complete command after SATN and stop */
530 if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) {
531 return;
532 }
533
534 s->ti_size = 0;
535 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
536 /* No command received */
537 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
538 return;
539 }
540
541 /* Command has been received */
542 s->do_cmd = 0;
543 do_cmd(s);
544 } else {
545 /*
546 * Extra message out bytes received: update cmdfifo_cdb_offset
547 * and then switch to command phase
548 */
549 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
550 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
551 s->rregs[ESP_RSEQ] = SEQ_CD;
552 s->rregs[ESP_RINTR] |= INTR_BS;
553 esp_raise_irq(s);
554 }
555 return;
556 }
557
558 if (!s->current_req) {
559 return;
560 }
561
562 if (to_device) {
563 /* Copy FIFO data to device */
564 len = MIN(s->async_len, ESP_FIFO_SZ);
565 len = MIN(len, fifo8_num_used(&s->fifo));
566 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
567 s->async_buf += n;
568 s->async_len -= n;
569 s->ti_size += n;
570
571 if (n < len) {
572 /* Unaligned accesses can cause FIFO wraparound */
573 len = len - n;
574 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
575 s->async_buf += n;
576 s->async_len -= n;
577 s->ti_size += n;
578 }
579
580 if (s->async_len == 0) {
581 scsi_req_continue(s->current_req);
582 return;
583 }
584
585 if (esp_get_tc(s) == 0) {
586 esp_lower_drq(s);
587 esp_dma_done(s);
588 }
589
590 return;
591 } else {
592 if (s->async_len == 0) {
593 /* Defer until the scsi layer has completed */
594 scsi_req_continue(s->current_req);
595 s->data_in_ready = false;
596 return;
597 }
598
599 if (esp_get_tc(s) != 0) {
600 /* Copy device data to FIFO */
601 len = MIN(s->async_len, esp_get_tc(s));
602 len = MIN(len, fifo8_num_free(&s->fifo));
603 fifo8_push_all(&s->fifo, s->async_buf, len);
604 s->async_buf += len;
605 s->async_len -= len;
606 s->ti_size -= len;
607 esp_set_tc(s, esp_get_tc(s) - len);
608
609 if (esp_get_tc(s) == 0) {
610 /* Indicate transfer to FIFO is complete */
611 s->rregs[ESP_RSTAT] |= STAT_TC;
612 }
613 return;
614 }
615
616 /* Partially filled a scsi buffer. Complete immediately. */
617 esp_lower_drq(s);
618 esp_dma_done(s);
619 }
620}
621
622static void esp_do_dma(ESPState *s)
623{
624 uint32_t len, cmdlen;
625 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
626 uint8_t buf[ESP_CMDFIFO_SZ];
627
628 len = esp_get_tc(s);
629 if (s->do_cmd) {
630 /*
631 * handle_ti_cmd() case: esp_do_dma() is called only from
632 * handle_ti_cmd() with do_cmd != NULL (see the assert())
633 */
634 cmdlen = fifo8_num_used(&s->cmdfifo);
635 trace_esp_do_dma(cmdlen, len);
636 if (s->dma_memory_read) {
637 len = MIN(len, fifo8_num_free(&s->cmdfifo));
638 s->dma_memory_read(s->dma_opaque, buf, len);
639 fifo8_push_all(&s->cmdfifo, buf, len);
640 } else {
641 esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
642 esp_raise_drq(s);
643 return;
644 }
645 trace_esp_handle_ti_cmd(cmdlen);
646 s->ti_size = 0;
647 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
648 /* No command received */
649 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
650 return;
651 }
652
653 /* Command has been received */
654 s->do_cmd = 0;
655 do_cmd(s);
656 } else {
657 /*
658 * Extra message out bytes received: update cmdfifo_cdb_offset
659 * and then switch to command phase
660 */
661 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
662 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
663 s->rregs[ESP_RSEQ] = SEQ_CD;
664 s->rregs[ESP_RINTR] |= INTR_BS;
665 esp_raise_irq(s);
666 }
667 return;
668 }
669 if (!s->current_req) {
670 return;
671 }
672 if (s->async_len == 0) {
673 /* Defer until data is available. */
674 return;
675 }
676 if (len > s->async_len) {
677 len = s->async_len;
678 }
679 if (to_device) {
680 if (s->dma_memory_read) {
681 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
682 } else {
683 esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
684 esp_raise_drq(s);
685 return;
686 }
687 } else {
688 if (s->dma_memory_write) {
689 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
690 } else {
691 /* Adjust TC for any leftover data in the FIFO */
692 if (!fifo8_is_empty(&s->fifo)) {
693 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo));
694 }
695
696 /* Copy device data to FIFO */
697 len = MIN(len, fifo8_num_free(&s->fifo));
698 fifo8_push_all(&s->fifo, s->async_buf, len);
699 s->async_buf += len;
700 s->async_len -= len;
701 s->ti_size -= len;
702
703 /*
704 * MacOS toolbox uses a TI length of 16 bytes for all commands, so
705 * commands shorter than this must be padded accordingly
706 */
707 if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) {
708 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) {
709 esp_fifo_push(&s->fifo, 0);
710 len++;
711 }
712 }
713
714 esp_set_tc(s, esp_get_tc(s) - len);
715 esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
716 esp_raise_drq(s);
717
718 /* Indicate transfer to FIFO is complete */
719 s->rregs[ESP_RSTAT] |= STAT_TC;
720 return;
721 }
722 }
723 esp_set_tc(s, esp_get_tc(s) - len);
724 s->async_buf += len;
725 s->async_len -= len;
726 if (to_device) {
727 s->ti_size += len;
728 } else {
729 s->ti_size -= len;
730 }
731 if (s->async_len == 0) {
732 scsi_req_continue(s->current_req);
733 /*
734 * If there is still data to be read from the device then
735 * complete the DMA operation immediately. Otherwise defer
736 * until the scsi layer has completed.
737 */
738 if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
739 return;
740 }
741 }
742
743 /* Partially filled a scsi buffer. Complete immediately. */
744 esp_dma_done(s);
745 esp_lower_drq(s);
746}
747
748static void esp_do_nodma(ESPState *s)
749{
750 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
751 uint32_t cmdlen;
752 int len;
753
754 if (s->do_cmd) {
755 cmdlen = fifo8_num_used(&s->cmdfifo);
756 trace_esp_handle_ti_cmd(cmdlen);
757 s->ti_size = 0;
758 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
759 /* No command received */
760 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
761 return;
762 }
763
764 /* Command has been received */
765 s->do_cmd = 0;
766 do_cmd(s);
767 } else {
768 /*
769 * Extra message out bytes received: update cmdfifo_cdb_offset
770 * and then switch to command phase
771 */
772 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
773 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
774 s->rregs[ESP_RSEQ] = SEQ_CD;
775 s->rregs[ESP_RINTR] |= INTR_BS;
776 esp_raise_irq(s);
777 }
778 return;
779 }
780
781 if (!s->current_req) {
782 return;
783 }
784
785 if (s->async_len == 0) {
786 /* Defer until data is available. */
787 return;
788 }
789
790 if (to_device) {
791 len = MIN(s->async_len, ESP_FIFO_SZ);
792 len = MIN(len, fifo8_num_used(&s->fifo));
793 esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
794 s->async_buf += len;
795 s->async_len -= len;
796 s->ti_size += len;
797 } else {
798 if (fifo8_is_empty(&s->fifo)) {
799 fifo8_push(&s->fifo, s->async_buf[0]);
800 s->async_buf++;
801 s->async_len--;
802 s->ti_size--;
803 }
804 }
805
806 if (s->async_len == 0) {
807 scsi_req_continue(s->current_req);
808 return;
809 }
810
811 s->rregs[ESP_RINTR] |= INTR_BS;
812 esp_raise_irq(s);
813}
814
815static void esp_pdma_cb(ESPState *s)
816{
817 switch (s->pdma_cb) {
818 case SATN_PDMA_CB:
819 satn_pdma_cb(s);
820 break;
821 case S_WITHOUT_SATN_PDMA_CB:
822 s_without_satn_pdma_cb(s);
823 break;
824 case SATN_STOP_PDMA_CB:
825 satn_stop_pdma_cb(s);
826 break;
827 case WRITE_RESPONSE_PDMA_CB:
828 write_response_pdma_cb(s);
829 break;
830 case DO_DMA_PDMA_CB:
831 do_dma_pdma_cb(s);
832 break;
833 default:
834 g_assert_not_reached();
835 }
836}
837
838void esp_command_complete(SCSIRequest *req, size_t resid)
839{
840 ESPState *s = req->hba_private;
841 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
842
843 trace_esp_command_complete();
844
845 /*
846 * Non-DMA transfers from the target will leave the last byte in
847 * the FIFO so don't reset ti_size in this case
848 */
849 if (s->dma || to_device) {
850 if (s->ti_size != 0) {
851 trace_esp_command_complete_unexpected();
852 }
853 s->ti_size = 0;
854 }
855
856 s->async_len = 0;
857 if (req->status) {
858 trace_esp_command_complete_fail();
859 }
860 s->status = req->status;
861
862 /*
863 * If the transfer is finished, switch to status phase. For non-DMA
864 * transfers from the target the last byte is still in the FIFO
865 */
866 if (s->ti_size == 0) {
867 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
868 esp_dma_done(s);
869 esp_lower_drq(s);
870 }
871
872 if (s->current_req) {
873 scsi_req_unref(s->current_req);
874 s->current_req = NULL;
875 s->current_dev = NULL;
876 }
877}
878
879void esp_transfer_data(SCSIRequest *req, uint32_t len)
880{
881 ESPState *s = req->hba_private;
882 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
883 uint32_t dmalen = esp_get_tc(s);
884
885 assert(!s->do_cmd);
886 trace_esp_transfer_data(dmalen, s->ti_size);
887 s->async_len = len;
888 s->async_buf = scsi_req_get_buf(req);
889
890 if (!to_device && !s->data_in_ready) {
891 /*
892 * Initial incoming data xfer is complete so raise command
893 * completion interrupt
894 */
895 s->data_in_ready = true;
896 s->rregs[ESP_RSTAT] |= STAT_TC;
897 s->rregs[ESP_RINTR] |= INTR_BS;
898 esp_raise_irq(s);
899 }
900
901 if (s->ti_cmd == 0) {
902 /*
903 * Always perform the initial transfer upon reception of the next TI
904 * command to ensure the DMA/non-DMA status of the command is correct.
905 * It is not possible to use s->dma directly in the section below as
906 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
907 * async data transfer is delayed then s->dma is set incorrectly.
908 */
909 return;
910 }
911
912 if (s->ti_cmd == (CMD_TI | CMD_DMA)) {
913 if (dmalen) {
914 esp_do_dma(s);
915 } else if (s->ti_size <= 0) {
916 /*
917 * If this was the last part of a DMA transfer then the
918 * completion interrupt is deferred to here.
919 */
920 esp_dma_done(s);
921 esp_lower_drq(s);
922 }
923 } else if (s->ti_cmd == CMD_TI) {
924 esp_do_nodma(s);
925 }
926}
927
928static void handle_ti(ESPState *s)
929{
930 uint32_t dmalen;
931
932 if (s->dma && !s->dma_enabled) {
933 s->dma_cb = handle_ti;
934 return;
935 }
936
937 s->ti_cmd = s->rregs[ESP_CMD];
938 if (s->dma) {
939 dmalen = esp_get_tc(s);
940 trace_esp_handle_ti(dmalen);
941 s->rregs[ESP_RSTAT] &= ~STAT_TC;
942 esp_do_dma(s);
943 } else {
944 trace_esp_handle_ti(s->ti_size);
945 esp_do_nodma(s);
946 }
947}
948
949void esp_hard_reset(ESPState *s)
950{
951 memset(s->rregs, 0, ESP_REGS);
952 memset(s->wregs, 0, ESP_REGS);
953 s->tchi_written = 0;
954 s->ti_size = 0;
955 s->async_len = 0;
956 fifo8_reset(&s->fifo);
957 fifo8_reset(&s->cmdfifo);
958 s->dma = 0;
959 s->do_cmd = 0;
960 s->dma_cb = NULL;
961
962 s->rregs[ESP_CFG1] = 7;
963}
964
965static void esp_soft_reset(ESPState *s)
966{
967 qemu_irq_lower(s->irq);
968 qemu_irq_lower(s->irq_data);
969 esp_hard_reset(s);
970}
971
972static void esp_bus_reset(ESPState *s)
973{
974 bus_cold_reset(BUS(&s->bus));
975}
976
977static void parent_esp_reset(ESPState *s, int irq, int level)
978{
979 if (level) {
980 esp_soft_reset(s);
981 }
982}
983
984uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
985{
986 uint32_t val;
987
988 switch (saddr) {
989 case ESP_FIFO:
990 if (s->dma_memory_read && s->dma_memory_write &&
991 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
992 /* Data out. */
993 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
994 s->rregs[ESP_FIFO] = 0;
995 } else {
996 if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) {
997 if (s->ti_size) {
998 esp_do_nodma(s);
999 } else {
1000 /*
1001 * The last byte of a non-DMA transfer has been read out
1002 * of the FIFO so switch to status phase
1003 */
1004 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
1005 }
1006 }
1007 s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo);
1008 }
1009 val = s->rregs[ESP_FIFO];
1010 break;
1011 case ESP_RINTR:
1012 /*
1013 * Clear sequence step, interrupt register and all status bits
1014 * except TC
1015 */
1016 val = s->rregs[ESP_RINTR];
1017 s->rregs[ESP_RINTR] = 0;
1018 s->rregs[ESP_RSTAT] &= ~STAT_TC;
1019 /*
1020 * According to the datasheet ESP_RSEQ should be cleared, but as the
1021 * emulation currently defers information transfers to the next TI
1022 * command leave it for now so that pedantic guests such as the old
1023 * Linux 2.6 driver see the correct flags before the next SCSI phase
1024 * transition.
1025 *
1026 * s->rregs[ESP_RSEQ] = SEQ_0;
1027 */
1028 esp_lower_irq(s);
1029 break;
1030 case ESP_TCHI:
1031 /* Return the unique id if the value has never been written */
1032 if (!s->tchi_written) {
1033 val = s->chip_id;
1034 } else {
1035 val = s->rregs[saddr];
1036 }
1037 break;
1038 case ESP_RFLAGS:
1039 /* Bottom 5 bits indicate number of bytes in FIFO */
1040 val = fifo8_num_used(&s->fifo);
1041 break;
1042 default:
1043 val = s->rregs[saddr];
1044 break;
1045 }
1046
1047 trace_esp_mem_readb(saddr, val);
1048 return val;
1049}
1050
1051void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1052{
1053 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1054 switch (saddr) {
1055 case ESP_TCHI:
1056 s->tchi_written = true;
1057 /* fall through */
1058 case ESP_TCLO:
1059 case ESP_TCMID:
1060 s->rregs[ESP_RSTAT] &= ~STAT_TC;
1061 break;
1062 case ESP_FIFO:
1063 if (s->do_cmd) {
1064 esp_fifo_push(&s->cmdfifo, val);
1065
1066 /*
1067 * If any unexpected message out/command phase data is
1068 * transferred using non-DMA, raise the interrupt
1069 */
1070 if (s->rregs[ESP_CMD] == CMD_TI) {
1071 s->rregs[ESP_RINTR] |= INTR_BS;
1072 esp_raise_irq(s);
1073 }
1074 } else {
1075 esp_fifo_push(&s->fifo, val);
1076 }
1077 break;
1078 case ESP_CMD:
1079 s->rregs[saddr] = val;
1080 if (val & CMD_DMA) {
1081 s->dma = 1;
1082 /* Reload DMA counter. */
1083 if (esp_get_stc(s) == 0) {
1084 esp_set_tc(s, 0x10000);
1085 } else {
1086 esp_set_tc(s, esp_get_stc(s));
1087 }
1088 } else {
1089 s->dma = 0;
1090 }
1091 switch (val & CMD_CMD) {
1092 case CMD_NOP:
1093 trace_esp_mem_writeb_cmd_nop(val);
1094 break;
1095 case CMD_FLUSH:
1096 trace_esp_mem_writeb_cmd_flush(val);
1097 fifo8_reset(&s->fifo);
1098 break;
1099 case CMD_RESET:
1100 trace_esp_mem_writeb_cmd_reset(val);
1101 esp_soft_reset(s);
1102 break;
1103 case CMD_BUSRESET:
1104 trace_esp_mem_writeb_cmd_bus_reset(val);
1105 esp_bus_reset(s);
1106 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1107 s->rregs[ESP_RINTR] |= INTR_RST;
1108 esp_raise_irq(s);
1109 }
1110 break;
1111 case CMD_TI:
1112 trace_esp_mem_writeb_cmd_ti(val);
1113 handle_ti(s);
1114 break;
1115 case CMD_ICCS:
1116 trace_esp_mem_writeb_cmd_iccs(val);
1117 write_response(s);
1118 s->rregs[ESP_RINTR] |= INTR_FC;
1119 s->rregs[ESP_RSTAT] |= STAT_MI;
1120 break;
1121 case CMD_MSGACC:
1122 trace_esp_mem_writeb_cmd_msgacc(val);
1123 s->rregs[ESP_RINTR] |= INTR_DC;
1124 s->rregs[ESP_RSEQ] = 0;
1125 s->rregs[ESP_RFLAGS] = 0;
1126 esp_raise_irq(s);
1127 break;
1128 case CMD_PAD:
1129 trace_esp_mem_writeb_cmd_pad(val);
1130 s->rregs[ESP_RSTAT] = STAT_TC;
1131 s->rregs[ESP_RINTR] |= INTR_FC;
1132 s->rregs[ESP_RSEQ] = 0;
1133 break;
1134 case CMD_SATN:
1135 trace_esp_mem_writeb_cmd_satn(val);
1136 break;
1137 case CMD_RSTATN:
1138 trace_esp_mem_writeb_cmd_rstatn(val);
1139 break;
1140 case CMD_SEL:
1141 trace_esp_mem_writeb_cmd_sel(val);
1142 handle_s_without_atn(s);
1143 break;
1144 case CMD_SELATN:
1145 trace_esp_mem_writeb_cmd_selatn(val);
1146 handle_satn(s);
1147 break;
1148 case CMD_SELATNS:
1149 trace_esp_mem_writeb_cmd_selatns(val);
1150 handle_satn_stop(s);
1151 break;
1152 case CMD_ENSEL:
1153 trace_esp_mem_writeb_cmd_ensel(val);
1154 s->rregs[ESP_RINTR] = 0;
1155 break;
1156 case CMD_DISSEL:
1157 trace_esp_mem_writeb_cmd_dissel(val);
1158 s->rregs[ESP_RINTR] = 0;
1159 esp_raise_irq(s);
1160 break;
1161 default:
1162 trace_esp_error_unhandled_command(val);
1163 break;
1164 }
1165 break;
1166 case ESP_WBUSID ... ESP_WSYNO:
1167 break;
1168 case ESP_CFG1:
1169 case ESP_CFG2: case ESP_CFG3:
1170 case ESP_RES3: case ESP_RES4:
1171 s->rregs[saddr] = val;
1172 break;
1173 case ESP_WCCF ... ESP_WTEST:
1174 break;
1175 default:
1176 trace_esp_error_invalid_write(val, saddr);
1177 return;
1178 }
1179 s->wregs[saddr] = val;
1180}
1181
1182static bool esp_mem_accepts(void *opaque, hwaddr addr,
1183 unsigned size, bool is_write,
1184 MemTxAttrs attrs)
1185{
1186 return (size == 1) || (is_write && size == 4);
1187}
1188
1189static bool esp_is_before_version_5(void *opaque, int version_id)
1190{
1191 ESPState *s = ESP(opaque);
1192
1193 version_id = MIN(version_id, s->mig_version_id);
1194 return version_id < 5;
1195}
1196
1197static bool esp_is_version_5(void *opaque, int version_id)
1198{
1199 ESPState *s = ESP(opaque);
1200
1201 version_id = MIN(version_id, s->mig_version_id);
1202 return version_id >= 5;
1203}
1204
1205static bool esp_is_version_6(void *opaque, int version_id)
1206{
1207 ESPState *s = ESP(opaque);
1208
1209 version_id = MIN(version_id, s->mig_version_id);
1210 return version_id >= 6;
1211}
1212
1213int esp_pre_save(void *opaque)
1214{
1215 ESPState *s = ESP(object_resolve_path_component(
1216 OBJECT(opaque), "esp"));
1217
1218 s->mig_version_id = vmstate_esp.version_id;
1219 return 0;
1220}
1221
1222static int esp_post_load(void *opaque, int version_id)
1223{
1224 ESPState *s = ESP(opaque);
1225 int len, i;
1226
1227 version_id = MIN(version_id, s->mig_version_id);
1228
1229 if (version_id < 5) {
1230 esp_set_tc(s, s->mig_dma_left);
1231
1232 /* Migrate ti_buf to fifo */
1233 len = s->mig_ti_wptr - s->mig_ti_rptr;
1234 for (i = 0; i < len; i++) {
1235 fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1236 }
1237
1238 /* Migrate cmdbuf to cmdfifo */
1239 for (i = 0; i < s->mig_cmdlen; i++) {
1240 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1241 }
1242 }
1243
1244 s->mig_version_id = vmstate_esp.version_id;
1245 return 0;
1246}
1247
1248/*
1249 * PDMA (or pseudo-DMA) is only used on the Macintosh and requires the
1250 * guest CPU to perform the transfers between the SCSI bus and memory
1251 * itself. This is indicated by the dma_memory_read and dma_memory_write
1252 * functions being NULL (in contrast to the ESP PCI device) whilst
1253 * dma_enabled is still set.
1254 */
1255
1256static bool esp_pdma_needed(void *opaque)
1257{
1258 ESPState *s = ESP(opaque);
1259
1260 return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
1261 s->dma_enabled;
1262}
1263
1264static const VMStateDescription vmstate_esp_pdma = {
1265 .name = "esp/pdma",
1266 .version_id = 0,
1267 .minimum_version_id = 0,
1268 .needed = esp_pdma_needed,
1269 .fields = (const VMStateField[]) {
1270 VMSTATE_UINT8(pdma_cb, ESPState),
1271 VMSTATE_END_OF_LIST()
1272 }
1273};
1274
1275const VMStateDescription vmstate_esp = {
1276 .name = "esp",
1277 .version_id = 6,
1278 .minimum_version_id = 3,
1279 .post_load = esp_post_load,
1280 .fields = (const VMStateField[]) {
1281 VMSTATE_BUFFER(rregs, ESPState),
1282 VMSTATE_BUFFER(wregs, ESPState),
1283 VMSTATE_INT32(ti_size, ESPState),
1284 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1285 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1286 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1287 VMSTATE_UINT32(status, ESPState),
1288 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1289 esp_is_before_version_5),
1290 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1291 esp_is_before_version_5),
1292 VMSTATE_UINT32(dma, ESPState),
1293 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1294 esp_is_before_version_5, 0, 16),
1295 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1296 esp_is_before_version_5, 16,
1297 sizeof(typeof_field(ESPState, mig_cmdbuf))),
1298 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1299 VMSTATE_UINT32(do_cmd, ESPState),
1300 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1301 VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
1302 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1303 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1304 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1305 VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5),
1306 VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1307 VMSTATE_END_OF_LIST()
1308 },
1309 .subsections = (const VMStateDescription * const []) {
1310 &vmstate_esp_pdma,
1311 NULL
1312 }
1313};
1314
1315static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1316 uint64_t val, unsigned int size)
1317{
1318 SysBusESPState *sysbus = opaque;
1319 ESPState *s = ESP(&sysbus->esp);
1320 uint32_t saddr;
1321
1322 saddr = addr >> sysbus->it_shift;
1323 esp_reg_write(s, saddr, val);
1324}
1325
1326static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1327 unsigned int size)
1328{
1329 SysBusESPState *sysbus = opaque;
1330 ESPState *s = ESP(&sysbus->esp);
1331 uint32_t saddr;
1332
1333 saddr = addr >> sysbus->it_shift;
1334 return esp_reg_read(s, saddr);
1335}
1336
1337static const MemoryRegionOps sysbus_esp_mem_ops = {
1338 .read = sysbus_esp_mem_read,
1339 .write = sysbus_esp_mem_write,
1340 .endianness = DEVICE_NATIVE_ENDIAN,
1341 .valid.accepts = esp_mem_accepts,
1342};
1343
1344static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1345 uint64_t val, unsigned int size)
1346{
1347 SysBusESPState *sysbus = opaque;
1348 ESPState *s = ESP(&sysbus->esp);
1349
1350 trace_esp_pdma_write(size);
1351
1352 switch (size) {
1353 case 1:
1354 esp_pdma_write(s, val);
1355 break;
1356 case 2:
1357 esp_pdma_write(s, val >> 8);
1358 esp_pdma_write(s, val);
1359 break;
1360 }
1361 esp_pdma_cb(s);
1362}
1363
1364static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1365 unsigned int size)
1366{
1367 SysBusESPState *sysbus = opaque;
1368 ESPState *s = ESP(&sysbus->esp);
1369 uint64_t val = 0;
1370
1371 trace_esp_pdma_read(size);
1372
1373 switch (size) {
1374 case 1:
1375 val = esp_pdma_read(s);
1376 break;
1377 case 2:
1378 val = esp_pdma_read(s);
1379 val = (val << 8) | esp_pdma_read(s);
1380 break;
1381 }
1382 if (fifo8_num_used(&s->fifo) < 2) {
1383 esp_pdma_cb(s);
1384 }
1385 return val;
1386}
1387
1388static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1389{
1390 ESPState *s = container_of(req->bus, ESPState, bus);
1391
1392 scsi_req_ref(req);
1393 s->current_req = req;
1394 return s;
1395}
1396
1397static const MemoryRegionOps sysbus_esp_pdma_ops = {
1398 .read = sysbus_esp_pdma_read,
1399 .write = sysbus_esp_pdma_write,
1400 .endianness = DEVICE_NATIVE_ENDIAN,
1401 .valid.min_access_size = 1,
1402 .valid.max_access_size = 4,
1403 .impl.min_access_size = 1,
1404 .impl.max_access_size = 2,
1405};
1406
1407static const struct SCSIBusInfo esp_scsi_info = {
1408 .tcq = false,
1409 .max_target = ESP_MAX_DEVS,
1410 .max_lun = 7,
1411
1412 .load_request = esp_load_request,
1413 .transfer_data = esp_transfer_data,
1414 .complete = esp_command_complete,
1415 .cancel = esp_request_cancelled
1416};
1417
1418static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1419{
1420 SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1421 ESPState *s = ESP(&sysbus->esp);
1422
1423 switch (irq) {
1424 case 0:
1425 parent_esp_reset(s, irq, level);
1426 break;
1427 case 1:
1428 esp_dma_enable(s, irq, level);
1429 break;
1430 }
1431}
1432
1433static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1434{
1435 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1436 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1437 ESPState *s = ESP(&sysbus->esp);
1438
1439 if (!qdev_realize(DEVICE(s), NULL, errp)) {
1440 return;
1441 }
1442
1443 sysbus_init_irq(sbd, &s->irq);
1444 sysbus_init_irq(sbd, &s->irq_data);
1445 assert(sysbus->it_shift != -1);
1446
1447 s->chip_id = TCHI_FAS100A;
1448 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1449 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1450 sysbus_init_mmio(sbd, &sysbus->iomem);
1451 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1452 sysbus, "esp-pdma", 4);
1453 sysbus_init_mmio(sbd, &sysbus->pdma);
1454
1455 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1456
1457 scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1458}
1459
1460static void sysbus_esp_hard_reset(DeviceState *dev)
1461{
1462 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1463 ESPState *s = ESP(&sysbus->esp);
1464
1465 esp_hard_reset(s);
1466}
1467
1468static void sysbus_esp_init(Object *obj)
1469{
1470 SysBusESPState *sysbus = SYSBUS_ESP(obj);
1471
1472 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1473}
1474
1475static const VMStateDescription vmstate_sysbus_esp_scsi = {
1476 .name = "sysbusespscsi",
1477 .version_id = 2,
1478 .minimum_version_id = 1,
1479 .pre_save = esp_pre_save,
1480 .fields = (const VMStateField[]) {
1481 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1482 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1483 VMSTATE_END_OF_LIST()
1484 }
1485};
1486
1487static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1488{
1489 DeviceClass *dc = DEVICE_CLASS(klass);
1490
1491 dc->realize = sysbus_esp_realize;
1492 dc->reset = sysbus_esp_hard_reset;
1493 dc->vmsd = &vmstate_sysbus_esp_scsi;
1494 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1495}
1496
1497static const TypeInfo sysbus_esp_info = {
1498 .name = TYPE_SYSBUS_ESP,
1499 .parent = TYPE_SYS_BUS_DEVICE,
1500 .instance_init = sysbus_esp_init,
1501 .instance_size = sizeof(SysBusESPState),
1502 .class_init = sysbus_esp_class_init,
1503};
1504
1505static void esp_finalize(Object *obj)
1506{
1507 ESPState *s = ESP(obj);
1508
1509 fifo8_destroy(&s->fifo);
1510 fifo8_destroy(&s->cmdfifo);
1511}
1512
1513static void esp_init(Object *obj)
1514{
1515 ESPState *s = ESP(obj);
1516
1517 fifo8_create(&s->fifo, ESP_FIFO_SZ);
1518 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1519}
1520
1521static void esp_class_init(ObjectClass *klass, void *data)
1522{
1523 DeviceClass *dc = DEVICE_CLASS(klass);
1524
1525 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1526 dc->user_creatable = false;
1527 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1528}
1529
1530static const TypeInfo esp_info = {
1531 .name = TYPE_ESP,
1532 .parent = TYPE_DEVICE,
1533 .instance_init = esp_init,
1534 .instance_finalize = esp_finalize,
1535 .instance_size = sizeof(ESPState),
1536 .class_init = esp_class_init,
1537};
1538
1539static void esp_register_types(void)
1540{
1541 type_register_static(&sysbus_esp_info);
1542 type_register_static(&esp_info);
1543}
1544
1545type_init(esp_register_types)
173}
174
175static uint32_t esp_get_stc(ESPState *s)
176{
177 uint32_t dmalen;
178
179 dmalen = s->wregs[ESP_TCLO];
180 dmalen |= s->wregs[ESP_TCMID] << 8;
181 dmalen |= s->wregs[ESP_TCHI] << 16;
182
183 return dmalen;
184}
185
186static uint8_t esp_pdma_read(ESPState *s)
187{
188 uint8_t val;
189
190 if (s->do_cmd) {
191 val = esp_fifo_pop(&s->cmdfifo);
192 } else {
193 val = esp_fifo_pop(&s->fifo);
194 }
195
196 return val;
197}
198
199static void esp_pdma_write(ESPState *s, uint8_t val)
200{
201 uint32_t dmalen = esp_get_tc(s);
202
203 if (dmalen == 0) {
204 return;
205 }
206
207 if (s->do_cmd) {
208 esp_fifo_push(&s->cmdfifo, val);
209 } else {
210 esp_fifo_push(&s->fifo, val);
211 }
212
213 dmalen--;
214 esp_set_tc(s, dmalen);
215}
216
217static void esp_set_pdma_cb(ESPState *s, enum pdma_cb cb)
218{
219 s->pdma_cb = cb;
220}
221
222static int esp_select(ESPState *s)
223{
224 int target;
225
226 target = s->wregs[ESP_WBUSID] & BUSID_DID;
227
228 s->ti_size = 0;
229
230 if (s->current_req) {
231 /* Started a new command before the old one finished. Cancel it. */
232 scsi_req_cancel(s->current_req);
233 }
234
235 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
236 if (!s->current_dev) {
237 /* No such drive */
238 s->rregs[ESP_RSTAT] = 0;
239 s->rregs[ESP_RINTR] = INTR_DC;
240 s->rregs[ESP_RSEQ] = SEQ_0;
241 esp_raise_irq(s);
242 return -1;
243 }
244
245 /*
246 * Note that we deliberately don't raise the IRQ here: this will be done
247 * either in do_command_phase() for DATA OUT transfers or by the deferred
248 * IRQ mechanism in esp_transfer_data() for DATA IN transfers
249 */
250 s->rregs[ESP_RINTR] |= INTR_FC;
251 s->rregs[ESP_RSEQ] = SEQ_CD;
252 return 0;
253}
254
255static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
256{
257 uint8_t buf[ESP_CMDFIFO_SZ];
258 uint32_t dmalen, n;
259 int target;
260
261 target = s->wregs[ESP_WBUSID] & BUSID_DID;
262 if (s->dma) {
263 dmalen = MIN(esp_get_tc(s), maxlen);
264 if (dmalen == 0) {
265 return 0;
266 }
267 if (s->dma_memory_read) {
268 s->dma_memory_read(s->dma_opaque, buf, dmalen);
269 dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen);
270 fifo8_push_all(&s->cmdfifo, buf, dmalen);
271 } else {
272 return 0;
273 }
274 } else {
275 dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
276 if (dmalen == 0) {
277 return 0;
278 }
279 n = esp_fifo_pop_buf(&s->fifo, buf, dmalen);
280 n = MIN(fifo8_num_free(&s->cmdfifo), n);
281 fifo8_push_all(&s->cmdfifo, buf, n);
282 }
283 trace_esp_get_cmd(dmalen, target);
284
285 return dmalen;
286}
287
288static void do_command_phase(ESPState *s)
289{
290 uint32_t cmdlen;
291 int32_t datalen;
292 SCSIDevice *current_lun;
293 uint8_t buf[ESP_CMDFIFO_SZ];
294
295 trace_esp_do_command_phase(s->lun);
296 cmdlen = fifo8_num_used(&s->cmdfifo);
297 if (!cmdlen || !s->current_dev) {
298 return;
299 }
300 esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen);
301
302 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
303 if (!current_lun) {
304 /* No such drive */
305 s->rregs[ESP_RSTAT] = 0;
306 s->rregs[ESP_RINTR] = INTR_DC;
307 s->rregs[ESP_RSEQ] = SEQ_0;
308 esp_raise_irq(s);
309 return;
310 }
311
312 s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
313 datalen = scsi_req_enqueue(s->current_req);
314 s->ti_size = datalen;
315 fifo8_reset(&s->cmdfifo);
316 if (datalen != 0) {
317 s->rregs[ESP_RSTAT] = STAT_TC;
318 s->rregs[ESP_RSEQ] = SEQ_CD;
319 s->ti_cmd = 0;
320 esp_set_tc(s, 0);
321 if (datalen > 0) {
322 /*
323 * Switch to DATA IN phase but wait until initial data xfer is
324 * complete before raising the command completion interrupt
325 */
326 s->data_in_ready = false;
327 s->rregs[ESP_RSTAT] |= STAT_DI;
328 } else {
329 s->rregs[ESP_RSTAT] |= STAT_DO;
330 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
331 esp_raise_irq(s);
332 esp_lower_drq(s);
333 }
334 scsi_req_continue(s->current_req);
335 return;
336 }
337}
338
339static void do_message_phase(ESPState *s)
340{
341 if (s->cmdfifo_cdb_offset) {
342 uint8_t message = esp_fifo_pop(&s->cmdfifo);
343
344 trace_esp_do_identify(message);
345 s->lun = message & 7;
346 s->cmdfifo_cdb_offset--;
347 }
348
349 /* Ignore extended messages for now */
350 if (s->cmdfifo_cdb_offset) {
351 int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
352 esp_fifo_pop_buf(&s->cmdfifo, NULL, len);
353 s->cmdfifo_cdb_offset = 0;
354 }
355}
356
357static void do_cmd(ESPState *s)
358{
359 do_message_phase(s);
360 assert(s->cmdfifo_cdb_offset == 0);
361 do_command_phase(s);
362}
363
364static void satn_pdma_cb(ESPState *s)
365{
366 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
367 s->cmdfifo_cdb_offset = 1;
368 s->do_cmd = 0;
369 do_cmd(s);
370 }
371}
372
373static void handle_satn(ESPState *s)
374{
375 int32_t cmdlen;
376
377 if (s->dma && !s->dma_enabled) {
378 s->dma_cb = handle_satn;
379 return;
380 }
381 esp_set_pdma_cb(s, SATN_PDMA_CB);
382 if (esp_select(s) < 0) {
383 return;
384 }
385 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
386 if (cmdlen > 0) {
387 s->cmdfifo_cdb_offset = 1;
388 s->do_cmd = 0;
389 do_cmd(s);
390 } else if (cmdlen == 0) {
391 if (s->dma) {
392 esp_raise_drq(s);
393 }
394 s->do_cmd = 1;
395 /* Target present, but no cmd yet - switch to command phase */
396 s->rregs[ESP_RSEQ] = SEQ_CD;
397 s->rregs[ESP_RSTAT] = STAT_CD;
398 }
399}
400
401static void s_without_satn_pdma_cb(ESPState *s)
402{
403 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
404 s->cmdfifo_cdb_offset = 0;
405 s->do_cmd = 0;
406 do_cmd(s);
407 }
408}
409
410static void handle_s_without_atn(ESPState *s)
411{
412 int32_t cmdlen;
413
414 if (s->dma && !s->dma_enabled) {
415 s->dma_cb = handle_s_without_atn;
416 return;
417 }
418 esp_set_pdma_cb(s, S_WITHOUT_SATN_PDMA_CB);
419 if (esp_select(s) < 0) {
420 return;
421 }
422 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
423 if (cmdlen > 0) {
424 s->cmdfifo_cdb_offset = 0;
425 s->do_cmd = 0;
426 do_cmd(s);
427 } else if (cmdlen == 0) {
428 if (s->dma) {
429 esp_raise_drq(s);
430 }
431 s->do_cmd = 1;
432 /* Target present, but no cmd yet - switch to command phase */
433 s->rregs[ESP_RSEQ] = SEQ_CD;
434 s->rregs[ESP_RSTAT] = STAT_CD;
435 }
436}
437
438static void satn_stop_pdma_cb(ESPState *s)
439{
440 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
441 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
442 s->do_cmd = 1;
443 s->cmdfifo_cdb_offset = 1;
444 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
445 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
446 s->rregs[ESP_RSEQ] = SEQ_CD;
447 esp_raise_irq(s);
448 }
449}
450
451static void handle_satn_stop(ESPState *s)
452{
453 int32_t cmdlen;
454
455 if (s->dma && !s->dma_enabled) {
456 s->dma_cb = handle_satn_stop;
457 return;
458 }
459 esp_set_pdma_cb(s, SATN_STOP_PDMA_CB);
460 if (esp_select(s) < 0) {
461 return;
462 }
463 cmdlen = get_cmd(s, 1);
464 if (cmdlen > 0) {
465 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
466 s->do_cmd = 1;
467 s->cmdfifo_cdb_offset = 1;
468 s->rregs[ESP_RSTAT] = STAT_MO;
469 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
470 s->rregs[ESP_RSEQ] = SEQ_MO;
471 esp_raise_irq(s);
472 } else if (cmdlen == 0) {
473 if (s->dma) {
474 esp_raise_drq(s);
475 }
476 s->do_cmd = 1;
477 /* Target present, switch to message out phase */
478 s->rregs[ESP_RSEQ] = SEQ_MO;
479 s->rregs[ESP_RSTAT] = STAT_MO;
480 }
481}
482
483static void write_response_pdma_cb(ESPState *s)
484{
485 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
486 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
487 s->rregs[ESP_RSEQ] = SEQ_CD;
488 esp_raise_irq(s);
489}
490
491static void write_response(ESPState *s)
492{
493 uint8_t buf[2];
494
495 trace_esp_write_response(s->status);
496
497 buf[0] = s->status;
498 buf[1] = 0;
499
500 if (s->dma) {
501 if (s->dma_memory_write) {
502 s->dma_memory_write(s->dma_opaque, buf, 2);
503 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
504 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
505 s->rregs[ESP_RSEQ] = SEQ_CD;
506 } else {
507 esp_set_pdma_cb(s, WRITE_RESPONSE_PDMA_CB);
508 esp_raise_drq(s);
509 return;
510 }
511 } else {
512 fifo8_reset(&s->fifo);
513 fifo8_push_all(&s->fifo, buf, 2);
514 s->rregs[ESP_RFLAGS] = 2;
515 }
516 esp_raise_irq(s);
517}
518
519static void esp_dma_done(ESPState *s)
520{
521 s->rregs[ESP_RSTAT] |= STAT_TC;
522 s->rregs[ESP_RINTR] |= INTR_BS;
523 s->rregs[ESP_RFLAGS] = 0;
524 esp_set_tc(s, 0);
525 esp_raise_irq(s);
526}
527
528static void do_dma_pdma_cb(ESPState *s)
529{
530 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
531 int len;
532 uint32_t n;
533
534 if (s->do_cmd) {
535 /* Ensure we have received complete command after SATN and stop */
536 if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) {
537 return;
538 }
539
540 s->ti_size = 0;
541 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
542 /* No command received */
543 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
544 return;
545 }
546
547 /* Command has been received */
548 s->do_cmd = 0;
549 do_cmd(s);
550 } else {
551 /*
552 * Extra message out bytes received: update cmdfifo_cdb_offset
553 * and then switch to command phase
554 */
555 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
556 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
557 s->rregs[ESP_RSEQ] = SEQ_CD;
558 s->rregs[ESP_RINTR] |= INTR_BS;
559 esp_raise_irq(s);
560 }
561 return;
562 }
563
564 if (!s->current_req) {
565 return;
566 }
567
568 if (to_device) {
569 /* Copy FIFO data to device */
570 len = MIN(s->async_len, ESP_FIFO_SZ);
571 len = MIN(len, fifo8_num_used(&s->fifo));
572 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
573 s->async_buf += n;
574 s->async_len -= n;
575 s->ti_size += n;
576
577 if (n < len) {
578 /* Unaligned accesses can cause FIFO wraparound */
579 len = len - n;
580 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
581 s->async_buf += n;
582 s->async_len -= n;
583 s->ti_size += n;
584 }
585
586 if (s->async_len == 0) {
587 scsi_req_continue(s->current_req);
588 return;
589 }
590
591 if (esp_get_tc(s) == 0) {
592 esp_lower_drq(s);
593 esp_dma_done(s);
594 }
595
596 return;
597 } else {
598 if (s->async_len == 0) {
599 /* Defer until the scsi layer has completed */
600 scsi_req_continue(s->current_req);
601 s->data_in_ready = false;
602 return;
603 }
604
605 if (esp_get_tc(s) != 0) {
606 /* Copy device data to FIFO */
607 len = MIN(s->async_len, esp_get_tc(s));
608 len = MIN(len, fifo8_num_free(&s->fifo));
609 fifo8_push_all(&s->fifo, s->async_buf, len);
610 s->async_buf += len;
611 s->async_len -= len;
612 s->ti_size -= len;
613 esp_set_tc(s, esp_get_tc(s) - len);
614
615 if (esp_get_tc(s) == 0) {
616 /* Indicate transfer to FIFO is complete */
617 s->rregs[ESP_RSTAT] |= STAT_TC;
618 }
619 return;
620 }
621
622 /* Partially filled a scsi buffer. Complete immediately. */
623 esp_lower_drq(s);
624 esp_dma_done(s);
625 }
626}
627
628static void esp_do_dma(ESPState *s)
629{
630 uint32_t len, cmdlen;
631 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
632 uint8_t buf[ESP_CMDFIFO_SZ];
633
634 len = esp_get_tc(s);
635 if (s->do_cmd) {
636 /*
637 * handle_ti_cmd() case: esp_do_dma() is called only from
638 * handle_ti_cmd() with do_cmd != NULL (see the assert())
639 */
640 cmdlen = fifo8_num_used(&s->cmdfifo);
641 trace_esp_do_dma(cmdlen, len);
642 if (s->dma_memory_read) {
643 len = MIN(len, fifo8_num_free(&s->cmdfifo));
644 s->dma_memory_read(s->dma_opaque, buf, len);
645 fifo8_push_all(&s->cmdfifo, buf, len);
646 } else {
647 esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
648 esp_raise_drq(s);
649 return;
650 }
651 trace_esp_handle_ti_cmd(cmdlen);
652 s->ti_size = 0;
653 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
654 /* No command received */
655 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
656 return;
657 }
658
659 /* Command has been received */
660 s->do_cmd = 0;
661 do_cmd(s);
662 } else {
663 /*
664 * Extra message out bytes received: update cmdfifo_cdb_offset
665 * and then switch to command phase
666 */
667 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
668 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
669 s->rregs[ESP_RSEQ] = SEQ_CD;
670 s->rregs[ESP_RINTR] |= INTR_BS;
671 esp_raise_irq(s);
672 }
673 return;
674 }
675 if (!s->current_req) {
676 return;
677 }
678 if (s->async_len == 0) {
679 /* Defer until data is available. */
680 return;
681 }
682 if (len > s->async_len) {
683 len = s->async_len;
684 }
685 if (to_device) {
686 if (s->dma_memory_read) {
687 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
688 } else {
689 esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
690 esp_raise_drq(s);
691 return;
692 }
693 } else {
694 if (s->dma_memory_write) {
695 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
696 } else {
697 /* Adjust TC for any leftover data in the FIFO */
698 if (!fifo8_is_empty(&s->fifo)) {
699 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo));
700 }
701
702 /* Copy device data to FIFO */
703 len = MIN(len, fifo8_num_free(&s->fifo));
704 fifo8_push_all(&s->fifo, s->async_buf, len);
705 s->async_buf += len;
706 s->async_len -= len;
707 s->ti_size -= len;
708
709 /*
710 * MacOS toolbox uses a TI length of 16 bytes for all commands, so
711 * commands shorter than this must be padded accordingly
712 */
713 if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) {
714 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) {
715 esp_fifo_push(&s->fifo, 0);
716 len++;
717 }
718 }
719
720 esp_set_tc(s, esp_get_tc(s) - len);
721 esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
722 esp_raise_drq(s);
723
724 /* Indicate transfer to FIFO is complete */
725 s->rregs[ESP_RSTAT] |= STAT_TC;
726 return;
727 }
728 }
729 esp_set_tc(s, esp_get_tc(s) - len);
730 s->async_buf += len;
731 s->async_len -= len;
732 if (to_device) {
733 s->ti_size += len;
734 } else {
735 s->ti_size -= len;
736 }
737 if (s->async_len == 0) {
738 scsi_req_continue(s->current_req);
739 /*
740 * If there is still data to be read from the device then
741 * complete the DMA operation immediately. Otherwise defer
742 * until the scsi layer has completed.
743 */
744 if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
745 return;
746 }
747 }
748
749 /* Partially filled a scsi buffer. Complete immediately. */
750 esp_dma_done(s);
751 esp_lower_drq(s);
752}
753
754static void esp_do_nodma(ESPState *s)
755{
756 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
757 uint32_t cmdlen;
758 int len;
759
760 if (s->do_cmd) {
761 cmdlen = fifo8_num_used(&s->cmdfifo);
762 trace_esp_handle_ti_cmd(cmdlen);
763 s->ti_size = 0;
764 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
765 /* No command received */
766 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
767 return;
768 }
769
770 /* Command has been received */
771 s->do_cmd = 0;
772 do_cmd(s);
773 } else {
774 /*
775 * Extra message out bytes received: update cmdfifo_cdb_offset
776 * and then switch to command phase
777 */
778 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
779 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
780 s->rregs[ESP_RSEQ] = SEQ_CD;
781 s->rregs[ESP_RINTR] |= INTR_BS;
782 esp_raise_irq(s);
783 }
784 return;
785 }
786
787 if (!s->current_req) {
788 return;
789 }
790
791 if (s->async_len == 0) {
792 /* Defer until data is available. */
793 return;
794 }
795
796 if (to_device) {
797 len = MIN(s->async_len, ESP_FIFO_SZ);
798 len = MIN(len, fifo8_num_used(&s->fifo));
799 esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
800 s->async_buf += len;
801 s->async_len -= len;
802 s->ti_size += len;
803 } else {
804 if (fifo8_is_empty(&s->fifo)) {
805 fifo8_push(&s->fifo, s->async_buf[0]);
806 s->async_buf++;
807 s->async_len--;
808 s->ti_size--;
809 }
810 }
811
812 if (s->async_len == 0) {
813 scsi_req_continue(s->current_req);
814 return;
815 }
816
817 s->rregs[ESP_RINTR] |= INTR_BS;
818 esp_raise_irq(s);
819}
820
821static void esp_pdma_cb(ESPState *s)
822{
823 switch (s->pdma_cb) {
824 case SATN_PDMA_CB:
825 satn_pdma_cb(s);
826 break;
827 case S_WITHOUT_SATN_PDMA_CB:
828 s_without_satn_pdma_cb(s);
829 break;
830 case SATN_STOP_PDMA_CB:
831 satn_stop_pdma_cb(s);
832 break;
833 case WRITE_RESPONSE_PDMA_CB:
834 write_response_pdma_cb(s);
835 break;
836 case DO_DMA_PDMA_CB:
837 do_dma_pdma_cb(s);
838 break;
839 default:
840 g_assert_not_reached();
841 }
842}
843
844void esp_command_complete(SCSIRequest *req, size_t resid)
845{
846 ESPState *s = req->hba_private;
847 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
848
849 trace_esp_command_complete();
850
851 /*
852 * Non-DMA transfers from the target will leave the last byte in
853 * the FIFO so don't reset ti_size in this case
854 */
855 if (s->dma || to_device) {
856 if (s->ti_size != 0) {
857 trace_esp_command_complete_unexpected();
858 }
859 s->ti_size = 0;
860 }
861
862 s->async_len = 0;
863 if (req->status) {
864 trace_esp_command_complete_fail();
865 }
866 s->status = req->status;
867
868 /*
869 * If the transfer is finished, switch to status phase. For non-DMA
870 * transfers from the target the last byte is still in the FIFO
871 */
872 if (s->ti_size == 0) {
873 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
874 esp_dma_done(s);
875 esp_lower_drq(s);
876 }
877
878 if (s->current_req) {
879 scsi_req_unref(s->current_req);
880 s->current_req = NULL;
881 s->current_dev = NULL;
882 }
883}
884
885void esp_transfer_data(SCSIRequest *req, uint32_t len)
886{
887 ESPState *s = req->hba_private;
888 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
889 uint32_t dmalen = esp_get_tc(s);
890
891 assert(!s->do_cmd);
892 trace_esp_transfer_data(dmalen, s->ti_size);
893 s->async_len = len;
894 s->async_buf = scsi_req_get_buf(req);
895
896 if (!to_device && !s->data_in_ready) {
897 /*
898 * Initial incoming data xfer is complete so raise command
899 * completion interrupt
900 */
901 s->data_in_ready = true;
902 s->rregs[ESP_RSTAT] |= STAT_TC;
903 s->rregs[ESP_RINTR] |= INTR_BS;
904 esp_raise_irq(s);
905 }
906
907 if (s->ti_cmd == 0) {
908 /*
909 * Always perform the initial transfer upon reception of the next TI
910 * command to ensure the DMA/non-DMA status of the command is correct.
911 * It is not possible to use s->dma directly in the section below as
912 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
913 * async data transfer is delayed then s->dma is set incorrectly.
914 */
915 return;
916 }
917
918 if (s->ti_cmd == (CMD_TI | CMD_DMA)) {
919 if (dmalen) {
920 esp_do_dma(s);
921 } else if (s->ti_size <= 0) {
922 /*
923 * If this was the last part of a DMA transfer then the
924 * completion interrupt is deferred to here.
925 */
926 esp_dma_done(s);
927 esp_lower_drq(s);
928 }
929 } else if (s->ti_cmd == CMD_TI) {
930 esp_do_nodma(s);
931 }
932}
933
934static void handle_ti(ESPState *s)
935{
936 uint32_t dmalen;
937
938 if (s->dma && !s->dma_enabled) {
939 s->dma_cb = handle_ti;
940 return;
941 }
942
943 s->ti_cmd = s->rregs[ESP_CMD];
944 if (s->dma) {
945 dmalen = esp_get_tc(s);
946 trace_esp_handle_ti(dmalen);
947 s->rregs[ESP_RSTAT] &= ~STAT_TC;
948 esp_do_dma(s);
949 } else {
950 trace_esp_handle_ti(s->ti_size);
951 esp_do_nodma(s);
952 }
953}
954
955void esp_hard_reset(ESPState *s)
956{
957 memset(s->rregs, 0, ESP_REGS);
958 memset(s->wregs, 0, ESP_REGS);
959 s->tchi_written = 0;
960 s->ti_size = 0;
961 s->async_len = 0;
962 fifo8_reset(&s->fifo);
963 fifo8_reset(&s->cmdfifo);
964 s->dma = 0;
965 s->do_cmd = 0;
966 s->dma_cb = NULL;
967
968 s->rregs[ESP_CFG1] = 7;
969}
970
971static void esp_soft_reset(ESPState *s)
972{
973 qemu_irq_lower(s->irq);
974 qemu_irq_lower(s->irq_data);
975 esp_hard_reset(s);
976}
977
978static void esp_bus_reset(ESPState *s)
979{
980 bus_cold_reset(BUS(&s->bus));
981}
982
983static void parent_esp_reset(ESPState *s, int irq, int level)
984{
985 if (level) {
986 esp_soft_reset(s);
987 }
988}
989
990uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
991{
992 uint32_t val;
993
994 switch (saddr) {
995 case ESP_FIFO:
996 if (s->dma_memory_read && s->dma_memory_write &&
997 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
998 /* Data out. */
999 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
1000 s->rregs[ESP_FIFO] = 0;
1001 } else {
1002 if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) {
1003 if (s->ti_size) {
1004 esp_do_nodma(s);
1005 } else {
1006 /*
1007 * The last byte of a non-DMA transfer has been read out
1008 * of the FIFO so switch to status phase
1009 */
1010 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
1011 }
1012 }
1013 s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo);
1014 }
1015 val = s->rregs[ESP_FIFO];
1016 break;
1017 case ESP_RINTR:
1018 /*
1019 * Clear sequence step, interrupt register and all status bits
1020 * except TC
1021 */
1022 val = s->rregs[ESP_RINTR];
1023 s->rregs[ESP_RINTR] = 0;
1024 s->rregs[ESP_RSTAT] &= ~STAT_TC;
1025 /*
1026 * According to the datasheet ESP_RSEQ should be cleared, but as the
1027 * emulation currently defers information transfers to the next TI
1028 * command leave it for now so that pedantic guests such as the old
1029 * Linux 2.6 driver see the correct flags before the next SCSI phase
1030 * transition.
1031 *
1032 * s->rregs[ESP_RSEQ] = SEQ_0;
1033 */
1034 esp_lower_irq(s);
1035 break;
1036 case ESP_TCHI:
1037 /* Return the unique id if the value has never been written */
1038 if (!s->tchi_written) {
1039 val = s->chip_id;
1040 } else {
1041 val = s->rregs[saddr];
1042 }
1043 break;
1044 case ESP_RFLAGS:
1045 /* Bottom 5 bits indicate number of bytes in FIFO */
1046 val = fifo8_num_used(&s->fifo);
1047 break;
1048 default:
1049 val = s->rregs[saddr];
1050 break;
1051 }
1052
1053 trace_esp_mem_readb(saddr, val);
1054 return val;
1055}
1056
1057void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1058{
1059 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1060 switch (saddr) {
1061 case ESP_TCHI:
1062 s->tchi_written = true;
1063 /* fall through */
1064 case ESP_TCLO:
1065 case ESP_TCMID:
1066 s->rregs[ESP_RSTAT] &= ~STAT_TC;
1067 break;
1068 case ESP_FIFO:
1069 if (s->do_cmd) {
1070 esp_fifo_push(&s->cmdfifo, val);
1071
1072 /*
1073 * If any unexpected message out/command phase data is
1074 * transferred using non-DMA, raise the interrupt
1075 */
1076 if (s->rregs[ESP_CMD] == CMD_TI) {
1077 s->rregs[ESP_RINTR] |= INTR_BS;
1078 esp_raise_irq(s);
1079 }
1080 } else {
1081 esp_fifo_push(&s->fifo, val);
1082 }
1083 break;
1084 case ESP_CMD:
1085 s->rregs[saddr] = val;
1086 if (val & CMD_DMA) {
1087 s->dma = 1;
1088 /* Reload DMA counter. */
1089 if (esp_get_stc(s) == 0) {
1090 esp_set_tc(s, 0x10000);
1091 } else {
1092 esp_set_tc(s, esp_get_stc(s));
1093 }
1094 } else {
1095 s->dma = 0;
1096 }
1097 switch (val & CMD_CMD) {
1098 case CMD_NOP:
1099 trace_esp_mem_writeb_cmd_nop(val);
1100 break;
1101 case CMD_FLUSH:
1102 trace_esp_mem_writeb_cmd_flush(val);
1103 fifo8_reset(&s->fifo);
1104 break;
1105 case CMD_RESET:
1106 trace_esp_mem_writeb_cmd_reset(val);
1107 esp_soft_reset(s);
1108 break;
1109 case CMD_BUSRESET:
1110 trace_esp_mem_writeb_cmd_bus_reset(val);
1111 esp_bus_reset(s);
1112 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1113 s->rregs[ESP_RINTR] |= INTR_RST;
1114 esp_raise_irq(s);
1115 }
1116 break;
1117 case CMD_TI:
1118 trace_esp_mem_writeb_cmd_ti(val);
1119 handle_ti(s);
1120 break;
1121 case CMD_ICCS:
1122 trace_esp_mem_writeb_cmd_iccs(val);
1123 write_response(s);
1124 s->rregs[ESP_RINTR] |= INTR_FC;
1125 s->rregs[ESP_RSTAT] |= STAT_MI;
1126 break;
1127 case CMD_MSGACC:
1128 trace_esp_mem_writeb_cmd_msgacc(val);
1129 s->rregs[ESP_RINTR] |= INTR_DC;
1130 s->rregs[ESP_RSEQ] = 0;
1131 s->rregs[ESP_RFLAGS] = 0;
1132 esp_raise_irq(s);
1133 break;
1134 case CMD_PAD:
1135 trace_esp_mem_writeb_cmd_pad(val);
1136 s->rregs[ESP_RSTAT] = STAT_TC;
1137 s->rregs[ESP_RINTR] |= INTR_FC;
1138 s->rregs[ESP_RSEQ] = 0;
1139 break;
1140 case CMD_SATN:
1141 trace_esp_mem_writeb_cmd_satn(val);
1142 break;
1143 case CMD_RSTATN:
1144 trace_esp_mem_writeb_cmd_rstatn(val);
1145 break;
1146 case CMD_SEL:
1147 trace_esp_mem_writeb_cmd_sel(val);
1148 handle_s_without_atn(s);
1149 break;
1150 case CMD_SELATN:
1151 trace_esp_mem_writeb_cmd_selatn(val);
1152 handle_satn(s);
1153 break;
1154 case CMD_SELATNS:
1155 trace_esp_mem_writeb_cmd_selatns(val);
1156 handle_satn_stop(s);
1157 break;
1158 case CMD_ENSEL:
1159 trace_esp_mem_writeb_cmd_ensel(val);
1160 s->rregs[ESP_RINTR] = 0;
1161 break;
1162 case CMD_DISSEL:
1163 trace_esp_mem_writeb_cmd_dissel(val);
1164 s->rregs[ESP_RINTR] = 0;
1165 esp_raise_irq(s);
1166 break;
1167 default:
1168 trace_esp_error_unhandled_command(val);
1169 break;
1170 }
1171 break;
1172 case ESP_WBUSID ... ESP_WSYNO:
1173 break;
1174 case ESP_CFG1:
1175 case ESP_CFG2: case ESP_CFG3:
1176 case ESP_RES3: case ESP_RES4:
1177 s->rregs[saddr] = val;
1178 break;
1179 case ESP_WCCF ... ESP_WTEST:
1180 break;
1181 default:
1182 trace_esp_error_invalid_write(val, saddr);
1183 return;
1184 }
1185 s->wregs[saddr] = val;
1186}
1187
1188static bool esp_mem_accepts(void *opaque, hwaddr addr,
1189 unsigned size, bool is_write,
1190 MemTxAttrs attrs)
1191{
1192 return (size == 1) || (is_write && size == 4);
1193}
1194
1195static bool esp_is_before_version_5(void *opaque, int version_id)
1196{
1197 ESPState *s = ESP(opaque);
1198
1199 version_id = MIN(version_id, s->mig_version_id);
1200 return version_id < 5;
1201}
1202
1203static bool esp_is_version_5(void *opaque, int version_id)
1204{
1205 ESPState *s = ESP(opaque);
1206
1207 version_id = MIN(version_id, s->mig_version_id);
1208 return version_id >= 5;
1209}
1210
1211static bool esp_is_version_6(void *opaque, int version_id)
1212{
1213 ESPState *s = ESP(opaque);
1214
1215 version_id = MIN(version_id, s->mig_version_id);
1216 return version_id >= 6;
1217}
1218
1219int esp_pre_save(void *opaque)
1220{
1221 ESPState *s = ESP(object_resolve_path_component(
1222 OBJECT(opaque), "esp"));
1223
1224 s->mig_version_id = vmstate_esp.version_id;
1225 return 0;
1226}
1227
1228static int esp_post_load(void *opaque, int version_id)
1229{
1230 ESPState *s = ESP(opaque);
1231 int len, i;
1232
1233 version_id = MIN(version_id, s->mig_version_id);
1234
1235 if (version_id < 5) {
1236 esp_set_tc(s, s->mig_dma_left);
1237
1238 /* Migrate ti_buf to fifo */
1239 len = s->mig_ti_wptr - s->mig_ti_rptr;
1240 for (i = 0; i < len; i++) {
1241 fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1242 }
1243
1244 /* Migrate cmdbuf to cmdfifo */
1245 for (i = 0; i < s->mig_cmdlen; i++) {
1246 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1247 }
1248 }
1249
1250 s->mig_version_id = vmstate_esp.version_id;
1251 return 0;
1252}
1253
1254/*
1255 * PDMA (or pseudo-DMA) is only used on the Macintosh and requires the
1256 * guest CPU to perform the transfers between the SCSI bus and memory
1257 * itself. This is indicated by the dma_memory_read and dma_memory_write
1258 * functions being NULL (in contrast to the ESP PCI device) whilst
1259 * dma_enabled is still set.
1260 */
1261
1262static bool esp_pdma_needed(void *opaque)
1263{
1264 ESPState *s = ESP(opaque);
1265
1266 return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
1267 s->dma_enabled;
1268}
1269
1270static const VMStateDescription vmstate_esp_pdma = {
1271 .name = "esp/pdma",
1272 .version_id = 0,
1273 .minimum_version_id = 0,
1274 .needed = esp_pdma_needed,
1275 .fields = (const VMStateField[]) {
1276 VMSTATE_UINT8(pdma_cb, ESPState),
1277 VMSTATE_END_OF_LIST()
1278 }
1279};
1280
1281const VMStateDescription vmstate_esp = {
1282 .name = "esp",
1283 .version_id = 6,
1284 .minimum_version_id = 3,
1285 .post_load = esp_post_load,
1286 .fields = (const VMStateField[]) {
1287 VMSTATE_BUFFER(rregs, ESPState),
1288 VMSTATE_BUFFER(wregs, ESPState),
1289 VMSTATE_INT32(ti_size, ESPState),
1290 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1291 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1292 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1293 VMSTATE_UINT32(status, ESPState),
1294 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1295 esp_is_before_version_5),
1296 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1297 esp_is_before_version_5),
1298 VMSTATE_UINT32(dma, ESPState),
1299 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1300 esp_is_before_version_5, 0, 16),
1301 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1302 esp_is_before_version_5, 16,
1303 sizeof(typeof_field(ESPState, mig_cmdbuf))),
1304 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1305 VMSTATE_UINT32(do_cmd, ESPState),
1306 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1307 VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
1308 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1309 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1310 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1311 VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5),
1312 VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1313 VMSTATE_END_OF_LIST()
1314 },
1315 .subsections = (const VMStateDescription * const []) {
1316 &vmstate_esp_pdma,
1317 NULL
1318 }
1319};
1320
1321static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1322 uint64_t val, unsigned int size)
1323{
1324 SysBusESPState *sysbus = opaque;
1325 ESPState *s = ESP(&sysbus->esp);
1326 uint32_t saddr;
1327
1328 saddr = addr >> sysbus->it_shift;
1329 esp_reg_write(s, saddr, val);
1330}
1331
1332static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1333 unsigned int size)
1334{
1335 SysBusESPState *sysbus = opaque;
1336 ESPState *s = ESP(&sysbus->esp);
1337 uint32_t saddr;
1338
1339 saddr = addr >> sysbus->it_shift;
1340 return esp_reg_read(s, saddr);
1341}
1342
1343static const MemoryRegionOps sysbus_esp_mem_ops = {
1344 .read = sysbus_esp_mem_read,
1345 .write = sysbus_esp_mem_write,
1346 .endianness = DEVICE_NATIVE_ENDIAN,
1347 .valid.accepts = esp_mem_accepts,
1348};
1349
1350static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1351 uint64_t val, unsigned int size)
1352{
1353 SysBusESPState *sysbus = opaque;
1354 ESPState *s = ESP(&sysbus->esp);
1355
1356 trace_esp_pdma_write(size);
1357
1358 switch (size) {
1359 case 1:
1360 esp_pdma_write(s, val);
1361 break;
1362 case 2:
1363 esp_pdma_write(s, val >> 8);
1364 esp_pdma_write(s, val);
1365 break;
1366 }
1367 esp_pdma_cb(s);
1368}
1369
1370static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1371 unsigned int size)
1372{
1373 SysBusESPState *sysbus = opaque;
1374 ESPState *s = ESP(&sysbus->esp);
1375 uint64_t val = 0;
1376
1377 trace_esp_pdma_read(size);
1378
1379 switch (size) {
1380 case 1:
1381 val = esp_pdma_read(s);
1382 break;
1383 case 2:
1384 val = esp_pdma_read(s);
1385 val = (val << 8) | esp_pdma_read(s);
1386 break;
1387 }
1388 if (fifo8_num_used(&s->fifo) < 2) {
1389 esp_pdma_cb(s);
1390 }
1391 return val;
1392}
1393
1394static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1395{
1396 ESPState *s = container_of(req->bus, ESPState, bus);
1397
1398 scsi_req_ref(req);
1399 s->current_req = req;
1400 return s;
1401}
1402
1403static const MemoryRegionOps sysbus_esp_pdma_ops = {
1404 .read = sysbus_esp_pdma_read,
1405 .write = sysbus_esp_pdma_write,
1406 .endianness = DEVICE_NATIVE_ENDIAN,
1407 .valid.min_access_size = 1,
1408 .valid.max_access_size = 4,
1409 .impl.min_access_size = 1,
1410 .impl.max_access_size = 2,
1411};
1412
1413static const struct SCSIBusInfo esp_scsi_info = {
1414 .tcq = false,
1415 .max_target = ESP_MAX_DEVS,
1416 .max_lun = 7,
1417
1418 .load_request = esp_load_request,
1419 .transfer_data = esp_transfer_data,
1420 .complete = esp_command_complete,
1421 .cancel = esp_request_cancelled
1422};
1423
1424static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1425{
1426 SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1427 ESPState *s = ESP(&sysbus->esp);
1428
1429 switch (irq) {
1430 case 0:
1431 parent_esp_reset(s, irq, level);
1432 break;
1433 case 1:
1434 esp_dma_enable(s, irq, level);
1435 break;
1436 }
1437}
1438
1439static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1440{
1441 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1442 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1443 ESPState *s = ESP(&sysbus->esp);
1444
1445 if (!qdev_realize(DEVICE(s), NULL, errp)) {
1446 return;
1447 }
1448
1449 sysbus_init_irq(sbd, &s->irq);
1450 sysbus_init_irq(sbd, &s->irq_data);
1451 assert(sysbus->it_shift != -1);
1452
1453 s->chip_id = TCHI_FAS100A;
1454 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1455 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1456 sysbus_init_mmio(sbd, &sysbus->iomem);
1457 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1458 sysbus, "esp-pdma", 4);
1459 sysbus_init_mmio(sbd, &sysbus->pdma);
1460
1461 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1462
1463 scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1464}
1465
1466static void sysbus_esp_hard_reset(DeviceState *dev)
1467{
1468 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1469 ESPState *s = ESP(&sysbus->esp);
1470
1471 esp_hard_reset(s);
1472}
1473
1474static void sysbus_esp_init(Object *obj)
1475{
1476 SysBusESPState *sysbus = SYSBUS_ESP(obj);
1477
1478 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1479}
1480
1481static const VMStateDescription vmstate_sysbus_esp_scsi = {
1482 .name = "sysbusespscsi",
1483 .version_id = 2,
1484 .minimum_version_id = 1,
1485 .pre_save = esp_pre_save,
1486 .fields = (const VMStateField[]) {
1487 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1488 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1489 VMSTATE_END_OF_LIST()
1490 }
1491};
1492
1493static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1494{
1495 DeviceClass *dc = DEVICE_CLASS(klass);
1496
1497 dc->realize = sysbus_esp_realize;
1498 dc->reset = sysbus_esp_hard_reset;
1499 dc->vmsd = &vmstate_sysbus_esp_scsi;
1500 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1501}
1502
1503static const TypeInfo sysbus_esp_info = {
1504 .name = TYPE_SYSBUS_ESP,
1505 .parent = TYPE_SYS_BUS_DEVICE,
1506 .instance_init = sysbus_esp_init,
1507 .instance_size = sizeof(SysBusESPState),
1508 .class_init = sysbus_esp_class_init,
1509};
1510
1511static void esp_finalize(Object *obj)
1512{
1513 ESPState *s = ESP(obj);
1514
1515 fifo8_destroy(&s->fifo);
1516 fifo8_destroy(&s->cmdfifo);
1517}
1518
1519static void esp_init(Object *obj)
1520{
1521 ESPState *s = ESP(obj);
1522
1523 fifo8_create(&s->fifo, ESP_FIFO_SZ);
1524 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1525}
1526
1527static void esp_class_init(ObjectClass *klass, void *data)
1528{
1529 DeviceClass *dc = DEVICE_CLASS(klass);
1530
1531 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1532 dc->user_creatable = false;
1533 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1534}
1535
1536static const TypeInfo esp_info = {
1537 .name = TYPE_ESP,
1538 .parent = TYPE_DEVICE,
1539 .instance_init = esp_init,
1540 .instance_finalize = esp_finalize,
1541 .instance_size = sizeof(ESPState),
1542 .class_init = esp_class_init,
1543};
1544
1545static void esp_register_types(void)
1546{
1547 type_register_static(&sysbus_esp_info);
1548 type_register_static(&esp_info);
1549}
1550
1551type_init(esp_register_types)