xref: /qemu/hw/ide/core.c (revision 3d100d0f)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include "qemu/osdep.h"
26 #include <hw/hw.h>
27 #include <hw/i386/pc.h>
28 #include <hw/pci/pci.h>
29 #include <hw/isa/isa.h>
30 #include "qemu/error-report.h"
31 #include "qemu/timer.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qemu/cutils.h"
37 
38 #include <hw/ide/internal.h>
39 
40 /* These values were based on a Seagate ST3500418AS but have been modified
41    to make more sense in QEMU */
42 static const int smart_attributes[][12] = {
43     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
44     /* raw read error rate*/
45     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
46     /* spin up */
47     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
48     /* start stop count */
49     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
50     /* remapped sectors */
51     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
52     /* power on hours */
53     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54     /* power cycle count */
55     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
56     /* airflow-temperature-celsius */
57     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
58 };
59 
60 static int ide_handle_rw_error(IDEState *s, int error, int op);
61 static void ide_dummy_transfer_stop(IDEState *s);
62 
63 static void padstr(char *str, const char *src, int len)
64 {
65     int i, v;
66     for(i = 0; i < len; i++) {
67         if (*src)
68             v = *src++;
69         else
70             v = ' ';
71         str[i^1] = v;
72     }
73 }
74 
75 static void put_le16(uint16_t *p, unsigned int v)
76 {
77     *p = cpu_to_le16(v);
78 }
79 
80 static void ide_identify_size(IDEState *s)
81 {
82     uint16_t *p = (uint16_t *)s->identify_data;
83     put_le16(p + 60, s->nb_sectors);
84     put_le16(p + 61, s->nb_sectors >> 16);
85     put_le16(p + 100, s->nb_sectors);
86     put_le16(p + 101, s->nb_sectors >> 16);
87     put_le16(p + 102, s->nb_sectors >> 32);
88     put_le16(p + 103, s->nb_sectors >> 48);
89 }
90 
91 static void ide_identify(IDEState *s)
92 {
93     uint16_t *p;
94     unsigned int oldsize;
95     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
96 
97     p = (uint16_t *)s->identify_data;
98     if (s->identify_set) {
99         goto fill_buffer;
100     }
101     memset(p, 0, sizeof(s->identify_data));
102 
103     put_le16(p + 0, 0x0040);
104     put_le16(p + 1, s->cylinders);
105     put_le16(p + 3, s->heads);
106     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
107     put_le16(p + 5, 512); /* XXX: retired, remove ? */
108     put_le16(p + 6, s->sectors);
109     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
110     put_le16(p + 20, 3); /* XXX: retired, remove ? */
111     put_le16(p + 21, 512); /* cache size in sectors */
112     put_le16(p + 22, 4); /* ecc bytes */
113     padstr((char *)(p + 23), s->version, 8); /* firmware version */
114     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
115 #if MAX_MULT_SECTORS > 1
116     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
117 #endif
118     put_le16(p + 48, 1); /* dword I/O */
119     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
120     put_le16(p + 51, 0x200); /* PIO transfer cycle */
121     put_le16(p + 52, 0x200); /* DMA transfer cycle */
122     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
123     put_le16(p + 54, s->cylinders);
124     put_le16(p + 55, s->heads);
125     put_le16(p + 56, s->sectors);
126     oldsize = s->cylinders * s->heads * s->sectors;
127     put_le16(p + 57, oldsize);
128     put_le16(p + 58, oldsize >> 16);
129     if (s->mult_sectors)
130         put_le16(p + 59, 0x100 | s->mult_sectors);
131     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
132     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
133     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
134     put_le16(p + 63, 0x07); /* mdma0-2 supported */
135     put_le16(p + 64, 0x03); /* pio3-4 supported */
136     put_le16(p + 65, 120);
137     put_le16(p + 66, 120);
138     put_le16(p + 67, 120);
139     put_le16(p + 68, 120);
140     if (dev && dev->conf.discard_granularity) {
141         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
142     }
143 
144     if (s->ncq_queues) {
145         put_le16(p + 75, s->ncq_queues - 1);
146         /* NCQ supported */
147         put_le16(p + 76, (1 << 8));
148     }
149 
150     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
151     put_le16(p + 81, 0x16); /* conforms to ata5 */
152     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
153     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
154     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
155     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
156     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
157     if (s->wwn) {
158         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
159     } else {
160         put_le16(p + 84, (1 << 14) | 0);
161     }
162     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
163     if (blk_enable_write_cache(s->blk)) {
164         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
165     } else {
166         put_le16(p + 85, (1 << 14) | 1);
167     }
168     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
169     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
170     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
171     if (s->wwn) {
172         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
173     } else {
174         put_le16(p + 87, (1 << 14) | 0);
175     }
176     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
177     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
178     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
179     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
180     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
181     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
182 
183     if (dev && dev->conf.physical_block_size)
184         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
185     if (s->wwn) {
186         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
187         put_le16(p + 108, s->wwn >> 48);
188         put_le16(p + 109, s->wwn >> 32);
189         put_le16(p + 110, s->wwn >> 16);
190         put_le16(p + 111, s->wwn);
191     }
192     if (dev && dev->conf.discard_granularity) {
193         put_le16(p + 169, 1); /* TRIM support */
194     }
195 
196     ide_identify_size(s);
197     s->identify_set = 1;
198 
199 fill_buffer:
200     memcpy(s->io_buffer, p, sizeof(s->identify_data));
201 }
202 
203 static void ide_atapi_identify(IDEState *s)
204 {
205     uint16_t *p;
206 
207     p = (uint16_t *)s->identify_data;
208     if (s->identify_set) {
209         goto fill_buffer;
210     }
211     memset(p, 0, sizeof(s->identify_data));
212 
213     /* Removable CDROM, 50us response, 12 byte packets */
214     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
215     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
216     put_le16(p + 20, 3); /* buffer type */
217     put_le16(p + 21, 512); /* cache size in sectors */
218     put_le16(p + 22, 4); /* ecc bytes */
219     padstr((char *)(p + 23), s->version, 8); /* firmware version */
220     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
221     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
222 #ifdef USE_DMA_CDROM
223     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
224     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
225     put_le16(p + 62, 7);  /* single word dma0-2 supported */
226     put_le16(p + 63, 7);  /* mdma0-2 supported */
227 #else
228     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
229     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
230     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
231 #endif
232     put_le16(p + 64, 3); /* pio3-4 supported */
233     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
234     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
235     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
236     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
237 
238     put_le16(p + 71, 30); /* in ns */
239     put_le16(p + 72, 30); /* in ns */
240 
241     if (s->ncq_queues) {
242         put_le16(p + 75, s->ncq_queues - 1);
243         /* NCQ supported */
244         put_le16(p + 76, (1 << 8));
245     }
246 
247     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
248     if (s->wwn) {
249         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
250         put_le16(p + 87, (1 << 8)); /* WWN enabled */
251     }
252 
253 #ifdef USE_DMA_CDROM
254     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
255 #endif
256 
257     if (s->wwn) {
258         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
259         put_le16(p + 108, s->wwn >> 48);
260         put_le16(p + 109, s->wwn >> 32);
261         put_le16(p + 110, s->wwn >> 16);
262         put_le16(p + 111, s->wwn);
263     }
264 
265     s->identify_set = 1;
266 
267 fill_buffer:
268     memcpy(s->io_buffer, p, sizeof(s->identify_data));
269 }
270 
271 static void ide_cfata_identify_size(IDEState *s)
272 {
273     uint16_t *p = (uint16_t *)s->identify_data;
274     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
275     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
276     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
277     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
278 }
279 
280 static void ide_cfata_identify(IDEState *s)
281 {
282     uint16_t *p;
283     uint32_t cur_sec;
284 
285     p = (uint16_t *)s->identify_data;
286     if (s->identify_set) {
287         goto fill_buffer;
288     }
289     memset(p, 0, sizeof(s->identify_data));
290 
291     cur_sec = s->cylinders * s->heads * s->sectors;
292 
293     put_le16(p + 0, 0x848a);			/* CF Storage Card signature */
294     put_le16(p + 1, s->cylinders);		/* Default cylinders */
295     put_le16(p + 3, s->heads);			/* Default heads */
296     put_le16(p + 6, s->sectors);		/* Default sectors per track */
297     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
298     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
299     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
300     put_le16(p + 22, 0x0004);			/* ECC bytes */
301     padstr((char *) (p + 23), s->version, 8);	/* Firmware Revision */
302     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
303 #if MAX_MULT_SECTORS > 1
304     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
305 #else
306     put_le16(p + 47, 0x0000);
307 #endif
308     put_le16(p + 49, 0x0f00);			/* Capabilities */
309     put_le16(p + 51, 0x0002);			/* PIO cycle timing mode */
310     put_le16(p + 52, 0x0001);			/* DMA cycle timing mode */
311     put_le16(p + 53, 0x0003);			/* Translation params valid */
312     put_le16(p + 54, s->cylinders);		/* Current cylinders */
313     put_le16(p + 55, s->heads);			/* Current heads */
314     put_le16(p + 56, s->sectors);		/* Current sectors */
315     put_le16(p + 57, cur_sec);			/* Current capacity */
316     put_le16(p + 58, cur_sec >> 16);		/* Current capacity */
317     if (s->mult_sectors)			/* Multiple sector setting */
318         put_le16(p + 59, 0x100 | s->mult_sectors);
319     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
320     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
321     put_le16(p + 63, 0x0203);			/* Multiword DMA capability */
322     put_le16(p + 64, 0x0001);			/* Flow Control PIO support */
323     put_le16(p + 65, 0x0096);			/* Min. Multiword DMA cycle */
324     put_le16(p + 66, 0x0096);			/* Rec. Multiword DMA cycle */
325     put_le16(p + 68, 0x00b4);			/* Min. PIO cycle time */
326     put_le16(p + 82, 0x400c);			/* Command Set supported */
327     put_le16(p + 83, 0x7068);			/* Command Set supported */
328     put_le16(p + 84, 0x4000);			/* Features supported */
329     put_le16(p + 85, 0x000c);			/* Command Set enabled */
330     put_le16(p + 86, 0x7044);			/* Command Set enabled */
331     put_le16(p + 87, 0x4000);			/* Features enabled */
332     put_le16(p + 91, 0x4060);			/* Current APM level */
333     put_le16(p + 129, 0x0002);			/* Current features option */
334     put_le16(p + 130, 0x0005);			/* Reassigned sectors */
335     put_le16(p + 131, 0x0001);			/* Initial power mode */
336     put_le16(p + 132, 0x0000);			/* User signature */
337     put_le16(p + 160, 0x8100);			/* Power requirement */
338     put_le16(p + 161, 0x8001);			/* CF command set */
339 
340     ide_cfata_identify_size(s);
341     s->identify_set = 1;
342 
343 fill_buffer:
344     memcpy(s->io_buffer, p, sizeof(s->identify_data));
345 }
346 
347 static void ide_set_signature(IDEState *s)
348 {
349     s->select &= 0xf0; /* clear head */
350     /* put signature */
351     s->nsector = 1;
352     s->sector = 1;
353     if (s->drive_kind == IDE_CD) {
354         s->lcyl = 0x14;
355         s->hcyl = 0xeb;
356     } else if (s->blk) {
357         s->lcyl = 0;
358         s->hcyl = 0;
359     } else {
360         s->lcyl = 0xff;
361         s->hcyl = 0xff;
362     }
363 }
364 
365 typedef struct TrimAIOCB {
366     BlockAIOCB common;
367     BlockBackend *blk;
368     QEMUBH *bh;
369     int ret;
370     QEMUIOVector *qiov;
371     BlockAIOCB *aiocb;
372     int i, j;
373 } TrimAIOCB;
374 
375 static void trim_aio_cancel(BlockAIOCB *acb)
376 {
377     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
378 
379     /* Exit the loop so ide_issue_trim_cb will not continue  */
380     iocb->j = iocb->qiov->niov - 1;
381     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
382 
383     iocb->ret = -ECANCELED;
384 
385     if (iocb->aiocb) {
386         blk_aio_cancel_async(iocb->aiocb);
387         iocb->aiocb = NULL;
388     }
389 }
390 
391 static const AIOCBInfo trim_aiocb_info = {
392     .aiocb_size         = sizeof(TrimAIOCB),
393     .cancel_async       = trim_aio_cancel,
394 };
395 
396 static void ide_trim_bh_cb(void *opaque)
397 {
398     TrimAIOCB *iocb = opaque;
399 
400     iocb->common.cb(iocb->common.opaque, iocb->ret);
401 
402     qemu_bh_delete(iocb->bh);
403     iocb->bh = NULL;
404     qemu_aio_unref(iocb);
405 }
406 
407 static void ide_issue_trim_cb(void *opaque, int ret)
408 {
409     TrimAIOCB *iocb = opaque;
410     if (ret >= 0) {
411         while (iocb->j < iocb->qiov->niov) {
412             int j = iocb->j;
413             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
414                 int i = iocb->i;
415                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
416 
417                 /* 6-byte LBA + 2-byte range per entry */
418                 uint64_t entry = le64_to_cpu(buffer[i]);
419                 uint64_t sector = entry & 0x0000ffffffffffffULL;
420                 uint16_t count = entry >> 48;
421 
422                 if (count == 0) {
423                     continue;
424                 }
425 
426                 /* Got an entry! Submit and exit.  */
427                 iocb->aiocb = blk_aio_discard(iocb->blk, sector, count,
428                                               ide_issue_trim_cb, opaque);
429                 return;
430             }
431 
432             iocb->j++;
433             iocb->i = -1;
434         }
435     } else {
436         iocb->ret = ret;
437     }
438 
439     iocb->aiocb = NULL;
440     if (iocb->bh) {
441         qemu_bh_schedule(iocb->bh);
442     }
443 }
444 
445 BlockAIOCB *ide_issue_trim(BlockBackend *blk,
446         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
447         BlockCompletionFunc *cb, void *opaque)
448 {
449     TrimAIOCB *iocb;
450 
451     iocb = blk_aio_get(&trim_aiocb_info, blk, cb, opaque);
452     iocb->blk = blk;
453     iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
454     iocb->ret = 0;
455     iocb->qiov = qiov;
456     iocb->i = -1;
457     iocb->j = 0;
458     ide_issue_trim_cb(iocb, 0);
459     return &iocb->common;
460 }
461 
462 void ide_abort_command(IDEState *s)
463 {
464     ide_transfer_stop(s);
465     s->status = READY_STAT | ERR_STAT;
466     s->error = ABRT_ERR;
467 }
468 
469 /* prepare data transfer and tell what to do after */
470 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
471                         EndTransferFunc *end_transfer_func)
472 {
473     s->end_transfer_func = end_transfer_func;
474     s->data_ptr = buf;
475     s->data_end = buf + size;
476     if (!(s->status & ERR_STAT)) {
477         s->status |= DRQ_STAT;
478     }
479     if (s->bus->dma->ops->start_transfer) {
480         s->bus->dma->ops->start_transfer(s->bus->dma);
481     }
482 }
483 
484 static void ide_cmd_done(IDEState *s)
485 {
486     if (s->bus->dma->ops->cmd_done) {
487         s->bus->dma->ops->cmd_done(s->bus->dma);
488     }
489 }
490 
491 static void ide_transfer_halt(IDEState *s,
492                               void(*end_transfer_func)(IDEState *),
493                               bool notify)
494 {
495     s->end_transfer_func = end_transfer_func;
496     s->data_ptr = s->io_buffer;
497     s->data_end = s->io_buffer;
498     s->status &= ~DRQ_STAT;
499     if (notify) {
500         ide_cmd_done(s);
501     }
502 }
503 
504 void ide_transfer_stop(IDEState *s)
505 {
506     ide_transfer_halt(s, ide_transfer_stop, true);
507 }
508 
509 static void ide_transfer_cancel(IDEState *s)
510 {
511     ide_transfer_halt(s, ide_transfer_cancel, false);
512 }
513 
514 int64_t ide_get_sector(IDEState *s)
515 {
516     int64_t sector_num;
517     if (s->select & 0x40) {
518         /* lba */
519 	if (!s->lba48) {
520 	    sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
521 		(s->lcyl << 8) | s->sector;
522 	} else {
523 	    sector_num = ((int64_t)s->hob_hcyl << 40) |
524 		((int64_t) s->hob_lcyl << 32) |
525 		((int64_t) s->hob_sector << 24) |
526 		((int64_t) s->hcyl << 16) |
527 		((int64_t) s->lcyl << 8) | s->sector;
528 	}
529     } else {
530         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
531             (s->select & 0x0f) * s->sectors + (s->sector - 1);
532     }
533     return sector_num;
534 }
535 
536 void ide_set_sector(IDEState *s, int64_t sector_num)
537 {
538     unsigned int cyl, r;
539     if (s->select & 0x40) {
540 	if (!s->lba48) {
541             s->select = (s->select & 0xf0) | (sector_num >> 24);
542             s->hcyl = (sector_num >> 16);
543             s->lcyl = (sector_num >> 8);
544             s->sector = (sector_num);
545 	} else {
546 	    s->sector = sector_num;
547 	    s->lcyl = sector_num >> 8;
548 	    s->hcyl = sector_num >> 16;
549 	    s->hob_sector = sector_num >> 24;
550 	    s->hob_lcyl = sector_num >> 32;
551 	    s->hob_hcyl = sector_num >> 40;
552 	}
553     } else {
554         cyl = sector_num / (s->heads * s->sectors);
555         r = sector_num % (s->heads * s->sectors);
556         s->hcyl = cyl >> 8;
557         s->lcyl = cyl;
558         s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
559         s->sector = (r % s->sectors) + 1;
560     }
561 }
562 
563 static void ide_rw_error(IDEState *s) {
564     ide_abort_command(s);
565     ide_set_irq(s->bus);
566 }
567 
568 static bool ide_sect_range_ok(IDEState *s,
569                               uint64_t sector, uint64_t nb_sectors)
570 {
571     uint64_t total_sectors;
572 
573     blk_get_geometry(s->blk, &total_sectors);
574     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
575         return false;
576     }
577     return true;
578 }
579 
580 static void ide_buffered_readv_cb(void *opaque, int ret)
581 {
582     IDEBufferedRequest *req = opaque;
583     if (!req->orphaned) {
584         if (!ret) {
585             qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
586                                 req->original_qiov->size);
587         }
588         req->original_cb(req->original_opaque, ret);
589     }
590     QLIST_REMOVE(req, list);
591     qemu_vfree(req->iov.iov_base);
592     g_free(req);
593 }
594 
595 #define MAX_BUFFERED_REQS 16
596 
597 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
598                                QEMUIOVector *iov, int nb_sectors,
599                                BlockCompletionFunc *cb, void *opaque)
600 {
601     BlockAIOCB *aioreq;
602     IDEBufferedRequest *req;
603     int c = 0;
604 
605     QLIST_FOREACH(req, &s->buffered_requests, list) {
606         c++;
607     }
608     if (c > MAX_BUFFERED_REQS) {
609         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
610     }
611 
612     req = g_new0(IDEBufferedRequest, 1);
613     req->original_qiov = iov;
614     req->original_cb = cb;
615     req->original_opaque = opaque;
616     req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
617     req->iov.iov_len = iov->size;
618     qemu_iovec_init_external(&req->qiov, &req->iov, 1);
619 
620     aioreq = blk_aio_readv(s->blk, sector_num, &req->qiov, nb_sectors,
621                            ide_buffered_readv_cb, req);
622 
623     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
624     return aioreq;
625 }
626 
627 /**
628  * Cancel all pending DMA requests.
629  * Any buffered DMA requests are instantly canceled,
630  * but any pending unbuffered DMA requests must be waited on.
631  */
632 void ide_cancel_dma_sync(IDEState *s)
633 {
634     IDEBufferedRequest *req;
635 
636     /* First invoke the callbacks of all buffered requests
637      * and flag those requests as orphaned. Ideally there
638      * are no unbuffered (Scatter Gather DMA Requests or
639      * write requests) pending and we can avoid to drain. */
640     QLIST_FOREACH(req, &s->buffered_requests, list) {
641         if (!req->orphaned) {
642 #ifdef DEBUG_IDE
643             printf("%s: invoking cb %p of buffered request %p with"
644                    " -ECANCELED\n", __func__, req->original_cb, req);
645 #endif
646             req->original_cb(req->original_opaque, -ECANCELED);
647         }
648         req->orphaned = true;
649     }
650 
651     /*
652      * We can't cancel Scatter Gather DMA in the middle of the
653      * operation or a partial (not full) DMA transfer would reach
654      * the storage so we wait for completion instead (we beahve
655      * like if the DMA was completed by the time the guest trying
656      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
657      * set).
658      *
659      * In the future we'll be able to safely cancel the I/O if the
660      * whole DMA operation will be submitted to disk with a single
661      * aio operation with preadv/pwritev.
662      */
663     if (s->bus->dma->aiocb) {
664 #ifdef DEBUG_IDE
665         printf("%s: draining all remaining requests", __func__);
666 #endif
667         blk_drain(s->blk);
668         assert(s->bus->dma->aiocb == NULL);
669     }
670 }
671 
672 static void ide_sector_read(IDEState *s);
673 
674 static void ide_sector_read_cb(void *opaque, int ret)
675 {
676     IDEState *s = opaque;
677     int n;
678 
679     s->pio_aiocb = NULL;
680     s->status &= ~BUSY_STAT;
681 
682     if (ret == -ECANCELED) {
683         return;
684     }
685     if (ret != 0) {
686         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
687                                 IDE_RETRY_READ)) {
688             return;
689         }
690     }
691 
692     block_acct_done(blk_get_stats(s->blk), &s->acct);
693 
694     n = s->nsector;
695     if (n > s->req_nb_sectors) {
696         n = s->req_nb_sectors;
697     }
698 
699     ide_set_sector(s, ide_get_sector(s) + n);
700     s->nsector -= n;
701     /* Allow the guest to read the io_buffer */
702     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
703     ide_set_irq(s->bus);
704 }
705 
706 static void ide_sector_read(IDEState *s)
707 {
708     int64_t sector_num;
709     int n;
710 
711     s->status = READY_STAT | SEEK_STAT;
712     s->error = 0; /* not needed by IDE spec, but needed by Windows */
713     sector_num = ide_get_sector(s);
714     n = s->nsector;
715 
716     if (n == 0) {
717         ide_transfer_stop(s);
718         return;
719     }
720 
721     s->status |= BUSY_STAT;
722 
723     if (n > s->req_nb_sectors) {
724         n = s->req_nb_sectors;
725     }
726 
727 #if defined(DEBUG_IDE)
728     printf("sector=%" PRId64 "\n", sector_num);
729 #endif
730 
731     if (!ide_sect_range_ok(s, sector_num, n)) {
732         ide_rw_error(s);
733         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
734         return;
735     }
736 
737     s->iov.iov_base = s->io_buffer;
738     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
739     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
740 
741     block_acct_start(blk_get_stats(s->blk), &s->acct,
742                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
743     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
744                                       ide_sector_read_cb, s);
745 }
746 
747 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
748 {
749     if (s->bus->dma->ops->commit_buf) {
750         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
751     }
752     s->io_buffer_offset += tx_bytes;
753     qemu_sglist_destroy(&s->sg);
754 }
755 
756 void ide_set_inactive(IDEState *s, bool more)
757 {
758     s->bus->dma->aiocb = NULL;
759     s->bus->retry_unit = -1;
760     s->bus->retry_sector_num = 0;
761     s->bus->retry_nsector = 0;
762     if (s->bus->dma->ops->set_inactive) {
763         s->bus->dma->ops->set_inactive(s->bus->dma, more);
764     }
765     ide_cmd_done(s);
766 }
767 
768 void ide_dma_error(IDEState *s)
769 {
770     dma_buf_commit(s, 0);
771     ide_abort_command(s);
772     ide_set_inactive(s, false);
773     ide_set_irq(s->bus);
774 }
775 
776 static int ide_handle_rw_error(IDEState *s, int error, int op)
777 {
778     bool is_read = (op & IDE_RETRY_READ) != 0;
779     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
780 
781     if (action == BLOCK_ERROR_ACTION_STOP) {
782         assert(s->bus->retry_unit == s->unit);
783         s->bus->error_status = op;
784     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
785         block_acct_failed(blk_get_stats(s->blk), &s->acct);
786         if (op & IDE_RETRY_DMA) {
787             ide_dma_error(s);
788         } else {
789             ide_rw_error(s);
790         }
791     }
792     blk_error_action(s->blk, action, is_read, error);
793     return action != BLOCK_ERROR_ACTION_IGNORE;
794 }
795 
796 static void ide_dma_cb(void *opaque, int ret)
797 {
798     IDEState *s = opaque;
799     int n;
800     int64_t sector_num;
801     bool stay_active = false;
802 
803     if (ret == -ECANCELED) {
804         return;
805     }
806     if (ret < 0) {
807         int op = IDE_RETRY_DMA;
808 
809         if (s->dma_cmd == IDE_DMA_READ)
810             op |= IDE_RETRY_READ;
811         else if (s->dma_cmd == IDE_DMA_TRIM)
812             op |= IDE_RETRY_TRIM;
813 
814         if (ide_handle_rw_error(s, -ret, op)) {
815             return;
816         }
817     }
818 
819     n = s->io_buffer_size >> 9;
820     if (n > s->nsector) {
821         /* The PRDs were longer than needed for this request. Shorten them so
822          * we don't get a negative remainder. The Active bit must remain set
823          * after the request completes. */
824         n = s->nsector;
825         stay_active = true;
826     }
827 
828     sector_num = ide_get_sector(s);
829     if (n > 0) {
830         assert(n * 512 == s->sg.size);
831         dma_buf_commit(s, s->sg.size);
832         sector_num += n;
833         ide_set_sector(s, sector_num);
834         s->nsector -= n;
835     }
836 
837     /* end of transfer ? */
838     if (s->nsector == 0) {
839         s->status = READY_STAT | SEEK_STAT;
840         ide_set_irq(s->bus);
841         goto eot;
842     }
843 
844     /* launch next transfer */
845     n = s->nsector;
846     s->io_buffer_index = 0;
847     s->io_buffer_size = n * 512;
848     if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
849         /* The PRDs were too short. Reset the Active bit, but don't raise an
850          * interrupt. */
851         s->status = READY_STAT | SEEK_STAT;
852         dma_buf_commit(s, 0);
853         goto eot;
854     }
855 
856 #ifdef DEBUG_AIO
857     printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n",
858            sector_num, n, s->dma_cmd);
859 #endif
860 
861     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
862         !ide_sect_range_ok(s, sector_num, n)) {
863         ide_dma_error(s);
864         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
865         return;
866     }
867 
868     switch (s->dma_cmd) {
869     case IDE_DMA_READ:
870         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, sector_num,
871                                           ide_dma_cb, s);
872         break;
873     case IDE_DMA_WRITE:
874         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, sector_num,
875                                            ide_dma_cb, s);
876         break;
877     case IDE_DMA_TRIM:
878         s->bus->dma->aiocb = dma_blk_io(s->blk, &s->sg, sector_num,
879                                         ide_issue_trim, ide_dma_cb, s,
880                                         DMA_DIRECTION_TO_DEVICE);
881         break;
882     }
883     return;
884 
885 eot:
886     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
887         block_acct_done(blk_get_stats(s->blk), &s->acct);
888     }
889     ide_set_inactive(s, stay_active);
890 }
891 
892 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
893 {
894     s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
895     s->io_buffer_size = 0;
896     s->dma_cmd = dma_cmd;
897 
898     switch (dma_cmd) {
899     case IDE_DMA_READ:
900         block_acct_start(blk_get_stats(s->blk), &s->acct,
901                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
902         break;
903     case IDE_DMA_WRITE:
904         block_acct_start(blk_get_stats(s->blk), &s->acct,
905                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
906         break;
907     default:
908         break;
909     }
910 
911     ide_start_dma(s, ide_dma_cb);
912 }
913 
914 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
915 {
916     s->io_buffer_index = 0;
917     s->bus->retry_unit = s->unit;
918     s->bus->retry_sector_num = ide_get_sector(s);
919     s->bus->retry_nsector = s->nsector;
920     if (s->bus->dma->ops->start_dma) {
921         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
922     }
923 }
924 
925 static void ide_sector_write(IDEState *s);
926 
927 static void ide_sector_write_timer_cb(void *opaque)
928 {
929     IDEState *s = opaque;
930     ide_set_irq(s->bus);
931 }
932 
933 static void ide_sector_write_cb(void *opaque, int ret)
934 {
935     IDEState *s = opaque;
936     int n;
937 
938     if (ret == -ECANCELED) {
939         return;
940     }
941 
942     s->pio_aiocb = NULL;
943     s->status &= ~BUSY_STAT;
944 
945     if (ret != 0) {
946         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
947             return;
948         }
949     }
950 
951     block_acct_done(blk_get_stats(s->blk), &s->acct);
952 
953     n = s->nsector;
954     if (n > s->req_nb_sectors) {
955         n = s->req_nb_sectors;
956     }
957     s->nsector -= n;
958 
959     ide_set_sector(s, ide_get_sector(s) + n);
960     if (s->nsector == 0) {
961         /* no more sectors to write */
962         ide_transfer_stop(s);
963     } else {
964         int n1 = s->nsector;
965         if (n1 > s->req_nb_sectors) {
966             n1 = s->req_nb_sectors;
967         }
968         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
969                            ide_sector_write);
970     }
971 
972     if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
973         /* It seems there is a bug in the Windows 2000 installer HDD
974            IDE driver which fills the disk with empty logs when the
975            IDE write IRQ comes too early. This hack tries to correct
976            that at the expense of slower write performances. Use this
977            option _only_ to install Windows 2000. You must disable it
978            for normal use. */
979         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
980                   (NANOSECONDS_PER_SECOND / 1000));
981     } else {
982         ide_set_irq(s->bus);
983     }
984 }
985 
986 static void ide_sector_write(IDEState *s)
987 {
988     int64_t sector_num;
989     int n;
990 
991     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
992     sector_num = ide_get_sector(s);
993 #if defined(DEBUG_IDE)
994     printf("sector=%" PRId64 "\n", sector_num);
995 #endif
996     n = s->nsector;
997     if (n > s->req_nb_sectors) {
998         n = s->req_nb_sectors;
999     }
1000 
1001     if (!ide_sect_range_ok(s, sector_num, n)) {
1002         ide_rw_error(s);
1003         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1004         return;
1005     }
1006 
1007     s->iov.iov_base = s->io_buffer;
1008     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
1009     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
1010 
1011     block_acct_start(blk_get_stats(s->blk), &s->acct,
1012                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1013     s->pio_aiocb = blk_aio_writev(s->blk, sector_num, &s->qiov, n,
1014                                   ide_sector_write_cb, s);
1015 }
1016 
1017 static void ide_flush_cb(void *opaque, int ret)
1018 {
1019     IDEState *s = opaque;
1020 
1021     s->pio_aiocb = NULL;
1022 
1023     if (ret == -ECANCELED) {
1024         return;
1025     }
1026     if (ret < 0) {
1027         /* XXX: What sector number to set here? */
1028         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1029             return;
1030         }
1031     }
1032 
1033     if (s->blk) {
1034         block_acct_done(blk_get_stats(s->blk), &s->acct);
1035     }
1036     s->status = READY_STAT | SEEK_STAT;
1037     ide_cmd_done(s);
1038     ide_set_irq(s->bus);
1039 }
1040 
1041 static void ide_flush_cache(IDEState *s)
1042 {
1043     if (s->blk == NULL) {
1044         ide_flush_cb(s, 0);
1045         return;
1046     }
1047 
1048     s->status |= BUSY_STAT;
1049     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1050     s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1051 }
1052 
1053 static void ide_cfata_metadata_inquiry(IDEState *s)
1054 {
1055     uint16_t *p;
1056     uint32_t spd;
1057 
1058     p = (uint16_t *) s->io_buffer;
1059     memset(p, 0, 0x200);
1060     spd = ((s->mdata_size - 1) >> 9) + 1;
1061 
1062     put_le16(p + 0, 0x0001);			/* Data format revision */
1063     put_le16(p + 1, 0x0000);			/* Media property: silicon */
1064     put_le16(p + 2, s->media_changed);		/* Media status */
1065     put_le16(p + 3, s->mdata_size & 0xffff);	/* Capacity in bytes (low) */
1066     put_le16(p + 4, s->mdata_size >> 16);	/* Capacity in bytes (high) */
1067     put_le16(p + 5, spd & 0xffff);		/* Sectors per device (low) */
1068     put_le16(p + 6, spd >> 16);			/* Sectors per device (high) */
1069 }
1070 
1071 static void ide_cfata_metadata_read(IDEState *s)
1072 {
1073     uint16_t *p;
1074 
1075     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1076         s->status = ERR_STAT;
1077         s->error = ABRT_ERR;
1078         return;
1079     }
1080 
1081     p = (uint16_t *) s->io_buffer;
1082     memset(p, 0, 0x200);
1083 
1084     put_le16(p + 0, s->media_changed);		/* Media status */
1085     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1086                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1087                                     s->nsector << 9), 0x200 - 2));
1088 }
1089 
1090 static void ide_cfata_metadata_write(IDEState *s)
1091 {
1092     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1093         s->status = ERR_STAT;
1094         s->error = ABRT_ERR;
1095         return;
1096     }
1097 
1098     s->media_changed = 0;
1099 
1100     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1101                     s->io_buffer + 2,
1102                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1103                                     s->nsector << 9), 0x200 - 2));
1104 }
1105 
1106 /* called when the inserted state of the media has changed */
1107 static void ide_cd_change_cb(void *opaque, bool load)
1108 {
1109     IDEState *s = opaque;
1110     uint64_t nb_sectors;
1111 
1112     s->tray_open = !load;
1113     blk_get_geometry(s->blk, &nb_sectors);
1114     s->nb_sectors = nb_sectors;
1115 
1116     /*
1117      * First indicate to the guest that a CD has been removed.  That's
1118      * done on the next command the guest sends us.
1119      *
1120      * Then we set UNIT_ATTENTION, by which the guest will
1121      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1122      */
1123     s->cdrom_changed = 1;
1124     s->events.new_media = true;
1125     s->events.eject_request = false;
1126     ide_set_irq(s->bus);
1127 }
1128 
1129 static void ide_cd_eject_request_cb(void *opaque, bool force)
1130 {
1131     IDEState *s = opaque;
1132 
1133     s->events.eject_request = true;
1134     if (force) {
1135         s->tray_locked = false;
1136     }
1137     ide_set_irq(s->bus);
1138 }
1139 
1140 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1141 {
1142     s->lba48 = lba48;
1143 
1144     /* handle the 'magic' 0 nsector count conversion here. to avoid
1145      * fiddling with the rest of the read logic, we just store the
1146      * full sector count in ->nsector and ignore ->hob_nsector from now
1147      */
1148     if (!s->lba48) {
1149 	if (!s->nsector)
1150 	    s->nsector = 256;
1151     } else {
1152 	if (!s->nsector && !s->hob_nsector)
1153 	    s->nsector = 65536;
1154 	else {
1155 	    int lo = s->nsector;
1156 	    int hi = s->hob_nsector;
1157 
1158 	    s->nsector = (hi << 8) | lo;
1159 	}
1160     }
1161 }
1162 
1163 static void ide_clear_hob(IDEBus *bus)
1164 {
1165     /* any write clears HOB high bit of device control register */
1166     bus->ifs[0].select &= ~(1 << 7);
1167     bus->ifs[1].select &= ~(1 << 7);
1168 }
1169 
1170 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1171 {
1172     IDEBus *bus = opaque;
1173 
1174 #ifdef DEBUG_IDE
1175     printf("IDE: write addr=0x%x val=0x%02x\n", addr, val);
1176 #endif
1177 
1178     addr &= 7;
1179 
1180     /* ignore writes to command block while busy with previous command */
1181     if (addr != 7 && (idebus_active_if(bus)->status & (BUSY_STAT|DRQ_STAT)))
1182         return;
1183 
1184     switch(addr) {
1185     case 0:
1186         break;
1187     case 1:
1188 	ide_clear_hob(bus);
1189         /* NOTE: data is written to the two drives */
1190 	bus->ifs[0].hob_feature = bus->ifs[0].feature;
1191 	bus->ifs[1].hob_feature = bus->ifs[1].feature;
1192         bus->ifs[0].feature = val;
1193         bus->ifs[1].feature = val;
1194         break;
1195     case 2:
1196 	ide_clear_hob(bus);
1197 	bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1198 	bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1199         bus->ifs[0].nsector = val;
1200         bus->ifs[1].nsector = val;
1201         break;
1202     case 3:
1203 	ide_clear_hob(bus);
1204 	bus->ifs[0].hob_sector = bus->ifs[0].sector;
1205 	bus->ifs[1].hob_sector = bus->ifs[1].sector;
1206         bus->ifs[0].sector = val;
1207         bus->ifs[1].sector = val;
1208         break;
1209     case 4:
1210 	ide_clear_hob(bus);
1211 	bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1212 	bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1213         bus->ifs[0].lcyl = val;
1214         bus->ifs[1].lcyl = val;
1215         break;
1216     case 5:
1217 	ide_clear_hob(bus);
1218 	bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1219 	bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1220         bus->ifs[0].hcyl = val;
1221         bus->ifs[1].hcyl = val;
1222         break;
1223     case 6:
1224 	/* FIXME: HOB readback uses bit 7 */
1225         bus->ifs[0].select = (val & ~0x10) | 0xa0;
1226         bus->ifs[1].select = (val | 0x10) | 0xa0;
1227         /* select drive */
1228         bus->unit = (val >> 4) & 1;
1229         break;
1230     default:
1231     case 7:
1232         /* command */
1233         ide_exec_cmd(bus, val);
1234         break;
1235     }
1236 }
1237 
1238 static void ide_reset(IDEState *s)
1239 {
1240 #ifdef DEBUG_IDE
1241     printf("ide: reset\n");
1242 #endif
1243 
1244     if (s->pio_aiocb) {
1245         blk_aio_cancel(s->pio_aiocb);
1246         s->pio_aiocb = NULL;
1247     }
1248 
1249     if (s->drive_kind == IDE_CFATA)
1250         s->mult_sectors = 0;
1251     else
1252         s->mult_sectors = MAX_MULT_SECTORS;
1253     /* ide regs */
1254     s->feature = 0;
1255     s->error = 0;
1256     s->nsector = 0;
1257     s->sector = 0;
1258     s->lcyl = 0;
1259     s->hcyl = 0;
1260 
1261     /* lba48 */
1262     s->hob_feature = 0;
1263     s->hob_sector = 0;
1264     s->hob_nsector = 0;
1265     s->hob_lcyl = 0;
1266     s->hob_hcyl = 0;
1267 
1268     s->select = 0xa0;
1269     s->status = READY_STAT | SEEK_STAT;
1270 
1271     s->lba48 = 0;
1272 
1273     /* ATAPI specific */
1274     s->sense_key = 0;
1275     s->asc = 0;
1276     s->cdrom_changed = 0;
1277     s->packet_transfer_size = 0;
1278     s->elementary_transfer_size = 0;
1279     s->io_buffer_index = 0;
1280     s->cd_sector_size = 0;
1281     s->atapi_dma = 0;
1282     s->tray_locked = 0;
1283     s->tray_open = 0;
1284     /* ATA DMA state */
1285     s->io_buffer_size = 0;
1286     s->req_nb_sectors = 0;
1287 
1288     ide_set_signature(s);
1289     /* init the transfer handler so that 0xffff is returned on data
1290        accesses */
1291     s->end_transfer_func = ide_dummy_transfer_stop;
1292     ide_dummy_transfer_stop(s);
1293     s->media_changed = 0;
1294 }
1295 
1296 static bool cmd_nop(IDEState *s, uint8_t cmd)
1297 {
1298     return true;
1299 }
1300 
1301 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1302 {
1303     /* Halt PIO (in the DRQ phase), then DMA */
1304     ide_transfer_cancel(s);
1305     ide_cancel_dma_sync(s);
1306 
1307     /* Reset any PIO commands, reset signature, etc */
1308     ide_reset(s);
1309 
1310     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1311      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1312     s->status = 0x00;
1313 
1314     /* Do not overwrite status register */
1315     return false;
1316 }
1317 
1318 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1319 {
1320     switch (s->feature) {
1321     case DSM_TRIM:
1322         if (s->blk) {
1323             ide_sector_start_dma(s, IDE_DMA_TRIM);
1324             return false;
1325         }
1326         break;
1327     }
1328 
1329     ide_abort_command(s);
1330     return true;
1331 }
1332 
1333 static bool cmd_identify(IDEState *s, uint8_t cmd)
1334 {
1335     if (s->blk && s->drive_kind != IDE_CD) {
1336         if (s->drive_kind != IDE_CFATA) {
1337             ide_identify(s);
1338         } else {
1339             ide_cfata_identify(s);
1340         }
1341         s->status = READY_STAT | SEEK_STAT;
1342         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1343         ide_set_irq(s->bus);
1344         return false;
1345     } else {
1346         if (s->drive_kind == IDE_CD) {
1347             ide_set_signature(s);
1348         }
1349         ide_abort_command(s);
1350     }
1351 
1352     return true;
1353 }
1354 
1355 static bool cmd_verify(IDEState *s, uint8_t cmd)
1356 {
1357     bool lba48 = (cmd == WIN_VERIFY_EXT);
1358 
1359     /* do sector number check ? */
1360     ide_cmd_lba48_transform(s, lba48);
1361 
1362     return true;
1363 }
1364 
1365 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1366 {
1367     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1368         /* Disable Read and Write Multiple */
1369         s->mult_sectors = 0;
1370     } else if ((s->nsector & 0xff) != 0 &&
1371         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1372          (s->nsector & (s->nsector - 1)) != 0)) {
1373         ide_abort_command(s);
1374     } else {
1375         s->mult_sectors = s->nsector & 0xff;
1376     }
1377 
1378     return true;
1379 }
1380 
1381 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1382 {
1383     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1384 
1385     if (!s->blk || !s->mult_sectors) {
1386         ide_abort_command(s);
1387         return true;
1388     }
1389 
1390     ide_cmd_lba48_transform(s, lba48);
1391     s->req_nb_sectors = s->mult_sectors;
1392     ide_sector_read(s);
1393     return false;
1394 }
1395 
1396 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1397 {
1398     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1399     int n;
1400 
1401     if (!s->blk || !s->mult_sectors) {
1402         ide_abort_command(s);
1403         return true;
1404     }
1405 
1406     ide_cmd_lba48_transform(s, lba48);
1407 
1408     s->req_nb_sectors = s->mult_sectors;
1409     n = MIN(s->nsector, s->req_nb_sectors);
1410 
1411     s->status = SEEK_STAT | READY_STAT;
1412     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1413 
1414     s->media_changed = 1;
1415 
1416     return false;
1417 }
1418 
1419 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1420 {
1421     bool lba48 = (cmd == WIN_READ_EXT);
1422 
1423     if (s->drive_kind == IDE_CD) {
1424         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1425         ide_abort_command(s);
1426         return true;
1427     }
1428 
1429     if (!s->blk) {
1430         ide_abort_command(s);
1431         return true;
1432     }
1433 
1434     ide_cmd_lba48_transform(s, lba48);
1435     s->req_nb_sectors = 1;
1436     ide_sector_read(s);
1437 
1438     return false;
1439 }
1440 
1441 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1442 {
1443     bool lba48 = (cmd == WIN_WRITE_EXT);
1444 
1445     if (!s->blk) {
1446         ide_abort_command(s);
1447         return true;
1448     }
1449 
1450     ide_cmd_lba48_transform(s, lba48);
1451 
1452     s->req_nb_sectors = 1;
1453     s->status = SEEK_STAT | READY_STAT;
1454     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1455 
1456     s->media_changed = 1;
1457 
1458     return false;
1459 }
1460 
1461 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1462 {
1463     bool lba48 = (cmd == WIN_READDMA_EXT);
1464 
1465     if (!s->blk) {
1466         ide_abort_command(s);
1467         return true;
1468     }
1469 
1470     ide_cmd_lba48_transform(s, lba48);
1471     ide_sector_start_dma(s, IDE_DMA_READ);
1472 
1473     return false;
1474 }
1475 
1476 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1477 {
1478     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1479 
1480     if (!s->blk) {
1481         ide_abort_command(s);
1482         return true;
1483     }
1484 
1485     ide_cmd_lba48_transform(s, lba48);
1486     ide_sector_start_dma(s, IDE_DMA_WRITE);
1487 
1488     s->media_changed = 1;
1489 
1490     return false;
1491 }
1492 
1493 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1494 {
1495     ide_flush_cache(s);
1496     return false;
1497 }
1498 
1499 static bool cmd_seek(IDEState *s, uint8_t cmd)
1500 {
1501     /* XXX: Check that seek is within bounds */
1502     return true;
1503 }
1504 
1505 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1506 {
1507     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1508 
1509     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1510     if (s->nb_sectors == 0) {
1511         ide_abort_command(s);
1512         return true;
1513     }
1514 
1515     ide_cmd_lba48_transform(s, lba48);
1516     ide_set_sector(s, s->nb_sectors - 1);
1517 
1518     return true;
1519 }
1520 
1521 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1522 {
1523     s->nsector = 0xff; /* device active or idle */
1524     return true;
1525 }
1526 
1527 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1528 {
1529     uint16_t *identify_data;
1530 
1531     if (!s->blk) {
1532         ide_abort_command(s);
1533         return true;
1534     }
1535 
1536     /* XXX: valid for CDROM ? */
1537     switch (s->feature) {
1538     case 0x02: /* write cache enable */
1539         blk_set_enable_write_cache(s->blk, true);
1540         identify_data = (uint16_t *)s->identify_data;
1541         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1542         return true;
1543     case 0x82: /* write cache disable */
1544         blk_set_enable_write_cache(s->blk, false);
1545         identify_data = (uint16_t *)s->identify_data;
1546         put_le16(identify_data + 85, (1 << 14) | 1);
1547         ide_flush_cache(s);
1548         return false;
1549     case 0xcc: /* reverting to power-on defaults enable */
1550     case 0x66: /* reverting to power-on defaults disable */
1551     case 0xaa: /* read look-ahead enable */
1552     case 0x55: /* read look-ahead disable */
1553     case 0x05: /* set advanced power management mode */
1554     case 0x85: /* disable advanced power management mode */
1555     case 0x69: /* NOP */
1556     case 0x67: /* NOP */
1557     case 0x96: /* NOP */
1558     case 0x9a: /* NOP */
1559     case 0x42: /* enable Automatic Acoustic Mode */
1560     case 0xc2: /* disable Automatic Acoustic Mode */
1561         return true;
1562     case 0x03: /* set transfer mode */
1563         {
1564             uint8_t val = s->nsector & 0x07;
1565             identify_data = (uint16_t *)s->identify_data;
1566 
1567             switch (s->nsector >> 3) {
1568             case 0x00: /* pio default */
1569             case 0x01: /* pio mode */
1570                 put_le16(identify_data + 62, 0x07);
1571                 put_le16(identify_data + 63, 0x07);
1572                 put_le16(identify_data + 88, 0x3f);
1573                 break;
1574             case 0x02: /* sigle word dma mode*/
1575                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1576                 put_le16(identify_data + 63, 0x07);
1577                 put_le16(identify_data + 88, 0x3f);
1578                 break;
1579             case 0x04: /* mdma mode */
1580                 put_le16(identify_data + 62, 0x07);
1581                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1582                 put_le16(identify_data + 88, 0x3f);
1583                 break;
1584             case 0x08: /* udma mode */
1585                 put_le16(identify_data + 62, 0x07);
1586                 put_le16(identify_data + 63, 0x07);
1587                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1588                 break;
1589             default:
1590                 goto abort_cmd;
1591             }
1592             return true;
1593         }
1594     }
1595 
1596 abort_cmd:
1597     ide_abort_command(s);
1598     return true;
1599 }
1600 
1601 
1602 /*** ATAPI commands ***/
1603 
1604 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1605 {
1606     ide_atapi_identify(s);
1607     s->status = READY_STAT | SEEK_STAT;
1608     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1609     ide_set_irq(s->bus);
1610     return false;
1611 }
1612 
1613 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1614 {
1615     ide_set_signature(s);
1616 
1617     if (s->drive_kind == IDE_CD) {
1618         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1619                         * devices to return a clear status register
1620                         * with READY_STAT *not* set. */
1621         s->error = 0x01;
1622     } else {
1623         s->status = READY_STAT | SEEK_STAT;
1624         /* The bits of the error register are not as usual for this command!
1625          * They are part of the regular output (this is why ERR_STAT isn't set)
1626          * Device 0 passed, Device 1 passed or not present. */
1627         s->error = 0x01;
1628         ide_set_irq(s->bus);
1629     }
1630 
1631     return false;
1632 }
1633 
1634 static bool cmd_packet(IDEState *s, uint8_t cmd)
1635 {
1636     /* overlapping commands not supported */
1637     if (s->feature & 0x02) {
1638         ide_abort_command(s);
1639         return true;
1640     }
1641 
1642     s->status = READY_STAT | SEEK_STAT;
1643     s->atapi_dma = s->feature & 1;
1644     s->nsector = 1;
1645     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1646                        ide_atapi_cmd);
1647     return false;
1648 }
1649 
1650 
1651 /*** CF-ATA commands ***/
1652 
1653 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1654 {
1655     s->error = 0x09;    /* miscellaneous error */
1656     s->status = READY_STAT | SEEK_STAT;
1657     ide_set_irq(s->bus);
1658 
1659     return false;
1660 }
1661 
1662 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1663 {
1664     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1665      * required for Windows 8 to work with AHCI */
1666 
1667     if (cmd == CFA_WEAR_LEVEL) {
1668         s->nsector = 0;
1669     }
1670 
1671     if (cmd == CFA_ERASE_SECTORS) {
1672         s->media_changed = 1;
1673     }
1674 
1675     return true;
1676 }
1677 
1678 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1679 {
1680     s->status = READY_STAT | SEEK_STAT;
1681 
1682     memset(s->io_buffer, 0, 0x200);
1683     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1684     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1685     s->io_buffer[0x02] = s->select;                 /* Head */
1686     s->io_buffer[0x03] = s->sector;                 /* Sector */
1687     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1688     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1689     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1690     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1691     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1692     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1693     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1694 
1695     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1696     ide_set_irq(s->bus);
1697 
1698     return false;
1699 }
1700 
1701 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1702 {
1703     switch (s->feature) {
1704     case 0x02:  /* Inquiry Metadata Storage */
1705         ide_cfata_metadata_inquiry(s);
1706         break;
1707     case 0x03:  /* Read Metadata Storage */
1708         ide_cfata_metadata_read(s);
1709         break;
1710     case 0x04:  /* Write Metadata Storage */
1711         ide_cfata_metadata_write(s);
1712         break;
1713     default:
1714         ide_abort_command(s);
1715         return true;
1716     }
1717 
1718     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1719     s->status = 0x00; /* NOTE: READY is _not_ set */
1720     ide_set_irq(s->bus);
1721 
1722     return false;
1723 }
1724 
1725 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1726 {
1727     switch (s->feature) {
1728     case 0x01:  /* sense temperature in device */
1729         s->nsector = 0x50;      /* +20 C */
1730         break;
1731     default:
1732         ide_abort_command(s);
1733         return true;
1734     }
1735 
1736     return true;
1737 }
1738 
1739 
1740 /*** SMART commands ***/
1741 
1742 static bool cmd_smart(IDEState *s, uint8_t cmd)
1743 {
1744     int n;
1745 
1746     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1747         goto abort_cmd;
1748     }
1749 
1750     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1751         goto abort_cmd;
1752     }
1753 
1754     switch (s->feature) {
1755     case SMART_DISABLE:
1756         s->smart_enabled = 0;
1757         return true;
1758 
1759     case SMART_ENABLE:
1760         s->smart_enabled = 1;
1761         return true;
1762 
1763     case SMART_ATTR_AUTOSAVE:
1764         switch (s->sector) {
1765         case 0x00:
1766             s->smart_autosave = 0;
1767             break;
1768         case 0xf1:
1769             s->smart_autosave = 1;
1770             break;
1771         default:
1772             goto abort_cmd;
1773         }
1774         return true;
1775 
1776     case SMART_STATUS:
1777         if (!s->smart_errors) {
1778             s->hcyl = 0xc2;
1779             s->lcyl = 0x4f;
1780         } else {
1781             s->hcyl = 0x2c;
1782             s->lcyl = 0xf4;
1783         }
1784         return true;
1785 
1786     case SMART_READ_THRESH:
1787         memset(s->io_buffer, 0, 0x200);
1788         s->io_buffer[0] = 0x01; /* smart struct version */
1789 
1790         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1791             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1792             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1793         }
1794 
1795         /* checksum */
1796         for (n = 0; n < 511; n++) {
1797             s->io_buffer[511] += s->io_buffer[n];
1798         }
1799         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1800 
1801         s->status = READY_STAT | SEEK_STAT;
1802         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1803         ide_set_irq(s->bus);
1804         return false;
1805 
1806     case SMART_READ_DATA:
1807         memset(s->io_buffer, 0, 0x200);
1808         s->io_buffer[0] = 0x01; /* smart struct version */
1809 
1810         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1811             int i;
1812             for (i = 0; i < 11; i++) {
1813                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1814             }
1815         }
1816 
1817         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1818         if (s->smart_selftest_count == 0) {
1819             s->io_buffer[363] = 0;
1820         } else {
1821             s->io_buffer[363] =
1822                 s->smart_selftest_data[3 +
1823                            (s->smart_selftest_count - 1) *
1824                            24];
1825         }
1826         s->io_buffer[364] = 0x20;
1827         s->io_buffer[365] = 0x01;
1828         /* offline data collection capacity: execute + self-test*/
1829         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1830         s->io_buffer[368] = 0x03; /* smart capability (1) */
1831         s->io_buffer[369] = 0x00; /* smart capability (2) */
1832         s->io_buffer[370] = 0x01; /* error logging supported */
1833         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1834         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1835         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1836 
1837         for (n = 0; n < 511; n++) {
1838             s->io_buffer[511] += s->io_buffer[n];
1839         }
1840         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1841 
1842         s->status = READY_STAT | SEEK_STAT;
1843         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1844         ide_set_irq(s->bus);
1845         return false;
1846 
1847     case SMART_READ_LOG:
1848         switch (s->sector) {
1849         case 0x01: /* summary smart error log */
1850             memset(s->io_buffer, 0, 0x200);
1851             s->io_buffer[0] = 0x01;
1852             s->io_buffer[1] = 0x00; /* no error entries */
1853             s->io_buffer[452] = s->smart_errors & 0xff;
1854             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1855 
1856             for (n = 0; n < 511; n++) {
1857                 s->io_buffer[511] += s->io_buffer[n];
1858             }
1859             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1860             break;
1861         case 0x06: /* smart self test log */
1862             memset(s->io_buffer, 0, 0x200);
1863             s->io_buffer[0] = 0x01;
1864             if (s->smart_selftest_count == 0) {
1865                 s->io_buffer[508] = 0;
1866             } else {
1867                 s->io_buffer[508] = s->smart_selftest_count;
1868                 for (n = 2; n < 506; n++)  {
1869                     s->io_buffer[n] = s->smart_selftest_data[n];
1870                 }
1871             }
1872 
1873             for (n = 0; n < 511; n++) {
1874                 s->io_buffer[511] += s->io_buffer[n];
1875             }
1876             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1877             break;
1878         default:
1879             goto abort_cmd;
1880         }
1881         s->status = READY_STAT | SEEK_STAT;
1882         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1883         ide_set_irq(s->bus);
1884         return false;
1885 
1886     case SMART_EXECUTE_OFFLINE:
1887         switch (s->sector) {
1888         case 0: /* off-line routine */
1889         case 1: /* short self test */
1890         case 2: /* extended self test */
1891             s->smart_selftest_count++;
1892             if (s->smart_selftest_count > 21) {
1893                 s->smart_selftest_count = 1;
1894             }
1895             n = 2 + (s->smart_selftest_count - 1) * 24;
1896             s->smart_selftest_data[n] = s->sector;
1897             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1898             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1899             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1900             break;
1901         default:
1902             goto abort_cmd;
1903         }
1904         return true;
1905     }
1906 
1907 abort_cmd:
1908     ide_abort_command(s);
1909     return true;
1910 }
1911 
1912 #define HD_OK (1u << IDE_HD)
1913 #define CD_OK (1u << IDE_CD)
1914 #define CFA_OK (1u << IDE_CFATA)
1915 #define HD_CFA_OK (HD_OK | CFA_OK)
1916 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1917 
1918 /* Set the Disk Seek Completed status bit during completion */
1919 #define SET_DSC (1u << 8)
1920 
1921 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1922 static const struct {
1923     /* Returns true if the completion code should be run */
1924     bool (*handler)(IDEState *s, uint8_t cmd);
1925     int flags;
1926 } ide_cmd_table[0x100] = {
1927     /* NOP not implemented, mandatory for CD */
1928     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
1929     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
1930     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
1931     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
1932     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
1933     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
1934     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
1935     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
1936     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1937     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
1938     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
1939     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
1940     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
1941     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
1942     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
1943     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
1944     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
1945     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
1946     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
1947     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
1948     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
1949     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
1950     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
1951     [WIN_SPECIFY]                 = { cmd_nop, HD_CFA_OK | SET_DSC },
1952     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
1953     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
1954     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
1955     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
1956     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1957     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
1958     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
1959     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
1960     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
1961     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
1962     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
1963     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
1964     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
1965     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
1966     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
1967     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
1968     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
1969     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
1970     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
1971     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
1972     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
1973     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
1974     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
1975     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1976     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
1977     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
1978     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
1979     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
1980     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
1981     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
1982     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
1983     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1984 };
1985 
1986 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
1987 {
1988     return cmd < ARRAY_SIZE(ide_cmd_table)
1989         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
1990 }
1991 
1992 void ide_exec_cmd(IDEBus *bus, uint32_t val)
1993 {
1994     IDEState *s;
1995     bool complete;
1996 
1997 #if defined(DEBUG_IDE)
1998     printf("ide: CMD=%02x\n", val);
1999 #endif
2000     s = idebus_active_if(bus);
2001     /* ignore commands to non existent slave */
2002     if (s != bus->ifs && !s->blk) {
2003         return;
2004     }
2005 
2006     /* Only RESET is allowed while BSY and/or DRQ are set,
2007      * and only to ATAPI devices. */
2008     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2009         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2010             return;
2011         }
2012     }
2013 
2014     if (!ide_cmd_permitted(s, val)) {
2015         ide_abort_command(s);
2016         ide_set_irq(s->bus);
2017         return;
2018     }
2019 
2020     s->status = READY_STAT | BUSY_STAT;
2021     s->error = 0;
2022     s->io_buffer_offset = 0;
2023 
2024     complete = ide_cmd_table[val].handler(s, val);
2025     if (complete) {
2026         s->status &= ~BUSY_STAT;
2027         assert(!!s->error == !!(s->status & ERR_STAT));
2028 
2029         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2030             s->status |= SEEK_STAT;
2031         }
2032 
2033         ide_cmd_done(s);
2034         ide_set_irq(s->bus);
2035     }
2036 }
2037 
2038 uint32_t ide_ioport_read(void *opaque, uint32_t addr1)
2039 {
2040     IDEBus *bus = opaque;
2041     IDEState *s = idebus_active_if(bus);
2042     uint32_t addr;
2043     int ret, hob;
2044 
2045     addr = addr1 & 7;
2046     /* FIXME: HOB readback uses bit 7, but it's always set right now */
2047     //hob = s->select & (1 << 7);
2048     hob = 0;
2049     switch(addr) {
2050     case 0:
2051         ret = 0xff;
2052         break;
2053     case 1:
2054         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2055             (s != bus->ifs && !s->blk)) {
2056             ret = 0;
2057         } else if (!hob) {
2058             ret = s->error;
2059         } else {
2060 	    ret = s->hob_feature;
2061         }
2062         break;
2063     case 2:
2064         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2065             ret = 0;
2066         } else if (!hob) {
2067             ret = s->nsector & 0xff;
2068         } else {
2069 	    ret = s->hob_nsector;
2070         }
2071         break;
2072     case 3:
2073         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2074             ret = 0;
2075         } else if (!hob) {
2076             ret = s->sector;
2077         } else {
2078 	    ret = s->hob_sector;
2079         }
2080         break;
2081     case 4:
2082         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2083             ret = 0;
2084         } else if (!hob) {
2085             ret = s->lcyl;
2086         } else {
2087 	    ret = s->hob_lcyl;
2088         }
2089         break;
2090     case 5:
2091         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2092             ret = 0;
2093         } else if (!hob) {
2094             ret = s->hcyl;
2095         } else {
2096 	    ret = s->hob_hcyl;
2097         }
2098         break;
2099     case 6:
2100         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2101             ret = 0;
2102         } else {
2103             ret = s->select;
2104         }
2105         break;
2106     default:
2107     case 7:
2108         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2109             (s != bus->ifs && !s->blk)) {
2110             ret = 0;
2111         } else {
2112             ret = s->status;
2113         }
2114         qemu_irq_lower(bus->irq);
2115         break;
2116     }
2117 #ifdef DEBUG_IDE
2118     printf("ide: read addr=0x%x val=%02x\n", addr1, ret);
2119 #endif
2120     return ret;
2121 }
2122 
2123 uint32_t ide_status_read(void *opaque, uint32_t addr)
2124 {
2125     IDEBus *bus = opaque;
2126     IDEState *s = idebus_active_if(bus);
2127     int ret;
2128 
2129     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2130         (s != bus->ifs && !s->blk)) {
2131         ret = 0;
2132     } else {
2133         ret = s->status;
2134     }
2135 #ifdef DEBUG_IDE
2136     printf("ide: read status addr=0x%x val=%02x\n", addr, ret);
2137 #endif
2138     return ret;
2139 }
2140 
2141 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2142 {
2143     IDEBus *bus = opaque;
2144     IDEState *s;
2145     int i;
2146 
2147 #ifdef DEBUG_IDE
2148     printf("ide: write control addr=0x%x val=%02x\n", addr, val);
2149 #endif
2150     /* common for both drives */
2151     if (!(bus->cmd & IDE_CMD_RESET) &&
2152         (val & IDE_CMD_RESET)) {
2153         /* reset low to high */
2154         for(i = 0;i < 2; i++) {
2155             s = &bus->ifs[i];
2156             s->status = BUSY_STAT | SEEK_STAT;
2157             s->error = 0x01;
2158         }
2159     } else if ((bus->cmd & IDE_CMD_RESET) &&
2160                !(val & IDE_CMD_RESET)) {
2161         /* high to low */
2162         for(i = 0;i < 2; i++) {
2163             s = &bus->ifs[i];
2164             if (s->drive_kind == IDE_CD)
2165                 s->status = 0x00; /* NOTE: READY is _not_ set */
2166             else
2167                 s->status = READY_STAT | SEEK_STAT;
2168             ide_set_signature(s);
2169         }
2170     }
2171 
2172     bus->cmd = val;
2173 }
2174 
2175 /*
2176  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2177  * transferred from the device to the guest), false if it's a PIO in
2178  */
2179 static bool ide_is_pio_out(IDEState *s)
2180 {
2181     if (s->end_transfer_func == ide_sector_write ||
2182         s->end_transfer_func == ide_atapi_cmd) {
2183         return false;
2184     } else if (s->end_transfer_func == ide_sector_read ||
2185                s->end_transfer_func == ide_transfer_stop ||
2186                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2187                s->end_transfer_func == ide_dummy_transfer_stop) {
2188         return true;
2189     }
2190 
2191     abort();
2192 }
2193 
2194 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2195 {
2196     IDEBus *bus = opaque;
2197     IDEState *s = idebus_active_if(bus);
2198     uint8_t *p;
2199 
2200     /* PIO data access allowed only when DRQ bit is set. The result of a write
2201      * during PIO out is indeterminate, just ignore it. */
2202     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2203         return;
2204     }
2205 
2206     p = s->data_ptr;
2207     if (p + 2 > s->data_end) {
2208         return;
2209     }
2210 
2211     *(uint16_t *)p = le16_to_cpu(val);
2212     p += 2;
2213     s->data_ptr = p;
2214     if (p >= s->data_end) {
2215         s->status &= ~DRQ_STAT;
2216         s->end_transfer_func(s);
2217     }
2218 }
2219 
2220 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2221 {
2222     IDEBus *bus = opaque;
2223     IDEState *s = idebus_active_if(bus);
2224     uint8_t *p;
2225     int ret;
2226 
2227     /* PIO data access allowed only when DRQ bit is set. The result of a read
2228      * during PIO in is indeterminate, return 0 and don't move forward. */
2229     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2230         return 0;
2231     }
2232 
2233     p = s->data_ptr;
2234     if (p + 2 > s->data_end) {
2235         return 0;
2236     }
2237 
2238     ret = cpu_to_le16(*(uint16_t *)p);
2239     p += 2;
2240     s->data_ptr = p;
2241     if (p >= s->data_end) {
2242         s->status &= ~DRQ_STAT;
2243         s->end_transfer_func(s);
2244     }
2245     return ret;
2246 }
2247 
2248 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2249 {
2250     IDEBus *bus = opaque;
2251     IDEState *s = idebus_active_if(bus);
2252     uint8_t *p;
2253 
2254     /* PIO data access allowed only when DRQ bit is set. The result of a write
2255      * during PIO out is indeterminate, just ignore it. */
2256     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2257         return;
2258     }
2259 
2260     p = s->data_ptr;
2261     if (p + 4 > s->data_end) {
2262         return;
2263     }
2264 
2265     *(uint32_t *)p = le32_to_cpu(val);
2266     p += 4;
2267     s->data_ptr = p;
2268     if (p >= s->data_end) {
2269         s->status &= ~DRQ_STAT;
2270         s->end_transfer_func(s);
2271     }
2272 }
2273 
2274 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2275 {
2276     IDEBus *bus = opaque;
2277     IDEState *s = idebus_active_if(bus);
2278     uint8_t *p;
2279     int ret;
2280 
2281     /* PIO data access allowed only when DRQ bit is set. The result of a read
2282      * during PIO in is indeterminate, return 0 and don't move forward. */
2283     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2284         return 0;
2285     }
2286 
2287     p = s->data_ptr;
2288     if (p + 4 > s->data_end) {
2289         return 0;
2290     }
2291 
2292     ret = cpu_to_le32(*(uint32_t *)p);
2293     p += 4;
2294     s->data_ptr = p;
2295     if (p >= s->data_end) {
2296         s->status &= ~DRQ_STAT;
2297         s->end_transfer_func(s);
2298     }
2299     return ret;
2300 }
2301 
2302 static void ide_dummy_transfer_stop(IDEState *s)
2303 {
2304     s->data_ptr = s->io_buffer;
2305     s->data_end = s->io_buffer;
2306     s->io_buffer[0] = 0xff;
2307     s->io_buffer[1] = 0xff;
2308     s->io_buffer[2] = 0xff;
2309     s->io_buffer[3] = 0xff;
2310 }
2311 
2312 void ide_bus_reset(IDEBus *bus)
2313 {
2314     bus->unit = 0;
2315     bus->cmd = 0;
2316     ide_reset(&bus->ifs[0]);
2317     ide_reset(&bus->ifs[1]);
2318     ide_clear_hob(bus);
2319 
2320     /* pending async DMA */
2321     if (bus->dma->aiocb) {
2322 #ifdef DEBUG_AIO
2323         printf("aio_cancel\n");
2324 #endif
2325         blk_aio_cancel(bus->dma->aiocb);
2326         bus->dma->aiocb = NULL;
2327     }
2328 
2329     /* reset dma provider too */
2330     if (bus->dma->ops->reset) {
2331         bus->dma->ops->reset(bus->dma);
2332     }
2333 }
2334 
2335 static bool ide_cd_is_tray_open(void *opaque)
2336 {
2337     return ((IDEState *)opaque)->tray_open;
2338 }
2339 
2340 static bool ide_cd_is_medium_locked(void *opaque)
2341 {
2342     return ((IDEState *)opaque)->tray_locked;
2343 }
2344 
2345 static void ide_resize_cb(void *opaque)
2346 {
2347     IDEState *s = opaque;
2348     uint64_t nb_sectors;
2349 
2350     if (!s->identify_set) {
2351         return;
2352     }
2353 
2354     blk_get_geometry(s->blk, &nb_sectors);
2355     s->nb_sectors = nb_sectors;
2356 
2357     /* Update the identify data buffer. */
2358     if (s->drive_kind == IDE_CFATA) {
2359         ide_cfata_identify_size(s);
2360     } else {
2361         /* IDE_CD uses a different set of callbacks entirely. */
2362         assert(s->drive_kind != IDE_CD);
2363         ide_identify_size(s);
2364     }
2365 }
2366 
2367 static const BlockDevOps ide_cd_block_ops = {
2368     .change_media_cb = ide_cd_change_cb,
2369     .eject_request_cb = ide_cd_eject_request_cb,
2370     .is_tray_open = ide_cd_is_tray_open,
2371     .is_medium_locked = ide_cd_is_medium_locked,
2372 };
2373 
2374 static const BlockDevOps ide_hd_block_ops = {
2375     .resize_cb = ide_resize_cb,
2376 };
2377 
2378 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2379                    const char *version, const char *serial, const char *model,
2380                    uint64_t wwn,
2381                    uint32_t cylinders, uint32_t heads, uint32_t secs,
2382                    int chs_trans)
2383 {
2384     uint64_t nb_sectors;
2385 
2386     s->blk = blk;
2387     s->drive_kind = kind;
2388 
2389     blk_get_geometry(blk, &nb_sectors);
2390     s->cylinders = cylinders;
2391     s->heads = heads;
2392     s->sectors = secs;
2393     s->chs_trans = chs_trans;
2394     s->nb_sectors = nb_sectors;
2395     s->wwn = wwn;
2396     /* The SMART values should be preserved across power cycles
2397        but they aren't.  */
2398     s->smart_enabled = 1;
2399     s->smart_autosave = 1;
2400     s->smart_errors = 0;
2401     s->smart_selftest_count = 0;
2402     if (kind == IDE_CD) {
2403         blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2404         blk_set_guest_block_size(blk, 2048);
2405     } else {
2406         if (!blk_is_inserted(s->blk)) {
2407             error_report("Device needs media, but drive is empty");
2408             return -1;
2409         }
2410         if (blk_is_read_only(blk)) {
2411             error_report("Can't use a read-only drive");
2412             return -1;
2413         }
2414         blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2415     }
2416     if (serial) {
2417         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2418     } else {
2419         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2420                  "QM%05d", s->drive_serial);
2421     }
2422     if (model) {
2423         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2424     } else {
2425         switch (kind) {
2426         case IDE_CD:
2427             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2428             break;
2429         case IDE_CFATA:
2430             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2431             break;
2432         default:
2433             strcpy(s->drive_model_str, "QEMU HARDDISK");
2434             break;
2435         }
2436     }
2437 
2438     if (version) {
2439         pstrcpy(s->version, sizeof(s->version), version);
2440     } else {
2441         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2442     }
2443 
2444     ide_reset(s);
2445     blk_iostatus_enable(blk);
2446     return 0;
2447 }
2448 
2449 static void ide_init1(IDEBus *bus, int unit)
2450 {
2451     static int drive_serial = 1;
2452     IDEState *s = &bus->ifs[unit];
2453 
2454     s->bus = bus;
2455     s->unit = unit;
2456     s->drive_serial = drive_serial++;
2457     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2458     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2459     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2460     memset(s->io_buffer, 0, s->io_buffer_total_len);
2461 
2462     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2463     memset(s->smart_selftest_data, 0, 512);
2464 
2465     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2466                                            ide_sector_write_timer_cb, s);
2467 }
2468 
2469 static int ide_nop_int(IDEDMA *dma, int x)
2470 {
2471     return 0;
2472 }
2473 
2474 static void ide_nop(IDEDMA *dma)
2475 {
2476 }
2477 
2478 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2479 {
2480     return 0;
2481 }
2482 
2483 static const IDEDMAOps ide_dma_nop_ops = {
2484     .prepare_buf    = ide_nop_int32,
2485     .restart_dma    = ide_nop,
2486     .rw_buf         = ide_nop_int,
2487 };
2488 
2489 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2490 {
2491     s->unit = s->bus->retry_unit;
2492     ide_set_sector(s, s->bus->retry_sector_num);
2493     s->nsector = s->bus->retry_nsector;
2494     s->bus->dma->ops->restart_dma(s->bus->dma);
2495     s->io_buffer_size = 0;
2496     s->dma_cmd = dma_cmd;
2497     ide_start_dma(s, ide_dma_cb);
2498 }
2499 
2500 static void ide_restart_bh(void *opaque)
2501 {
2502     IDEBus *bus = opaque;
2503     IDEState *s;
2504     bool is_read;
2505     int error_status;
2506 
2507     qemu_bh_delete(bus->bh);
2508     bus->bh = NULL;
2509 
2510     error_status = bus->error_status;
2511     if (bus->error_status == 0) {
2512         return;
2513     }
2514 
2515     s = idebus_active_if(bus);
2516     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2517 
2518     /* The error status must be cleared before resubmitting the request: The
2519      * request may fail again, and this case can only be distinguished if the
2520      * called function can set a new error status. */
2521     bus->error_status = 0;
2522 
2523     /* The HBA has generically asked to be kicked on retry */
2524     if (error_status & IDE_RETRY_HBA) {
2525         if (s->bus->dma->ops->restart) {
2526             s->bus->dma->ops->restart(s->bus->dma);
2527         }
2528     }
2529 
2530     if (error_status & IDE_RETRY_DMA) {
2531         if (error_status & IDE_RETRY_TRIM) {
2532             ide_restart_dma(s, IDE_DMA_TRIM);
2533         } else {
2534             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2535         }
2536     } else if (error_status & IDE_RETRY_PIO) {
2537         if (is_read) {
2538             ide_sector_read(s);
2539         } else {
2540             ide_sector_write(s);
2541         }
2542     } else if (error_status & IDE_RETRY_FLUSH) {
2543         ide_flush_cache(s);
2544     } else {
2545         /*
2546          * We've not got any bits to tell us about ATAPI - but
2547          * we do have the end_transfer_func that tells us what
2548          * we're trying to do.
2549          */
2550         if (s->end_transfer_func == ide_atapi_cmd) {
2551             ide_atapi_dma_restart(s);
2552         }
2553     }
2554 }
2555 
2556 static void ide_restart_cb(void *opaque, int running, RunState state)
2557 {
2558     IDEBus *bus = opaque;
2559 
2560     if (!running)
2561         return;
2562 
2563     if (!bus->bh) {
2564         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2565         qemu_bh_schedule(bus->bh);
2566     }
2567 }
2568 
2569 void ide_register_restart_cb(IDEBus *bus)
2570 {
2571     if (bus->dma->ops->restart_dma) {
2572         qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2573     }
2574 }
2575 
2576 static IDEDMA ide_dma_nop = {
2577     .ops = &ide_dma_nop_ops,
2578     .aiocb = NULL,
2579 };
2580 
2581 void ide_init2(IDEBus *bus, qemu_irq irq)
2582 {
2583     int i;
2584 
2585     for(i = 0; i < 2; i++) {
2586         ide_init1(bus, i);
2587         ide_reset(&bus->ifs[i]);
2588     }
2589     bus->irq = irq;
2590     bus->dma = &ide_dma_nop;
2591 }
2592 
2593 static const MemoryRegionPortio ide_portio_list[] = {
2594     { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2595     { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2596     { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2597     PORTIO_END_OF_LIST(),
2598 };
2599 
2600 static const MemoryRegionPortio ide_portio2_list[] = {
2601     { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2602     PORTIO_END_OF_LIST(),
2603 };
2604 
2605 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2606 {
2607     /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2608        bridge has been setup properly to always register with ISA.  */
2609     isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide");
2610 
2611     if (iobase2) {
2612         isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide");
2613     }
2614 }
2615 
2616 static bool is_identify_set(void *opaque, int version_id)
2617 {
2618     IDEState *s = opaque;
2619 
2620     return s->identify_set != 0;
2621 }
2622 
2623 static EndTransferFunc* transfer_end_table[] = {
2624         ide_sector_read,
2625         ide_sector_write,
2626         ide_transfer_stop,
2627         ide_atapi_cmd_reply_end,
2628         ide_atapi_cmd,
2629         ide_dummy_transfer_stop,
2630 };
2631 
2632 static int transfer_end_table_idx(EndTransferFunc *fn)
2633 {
2634     int i;
2635 
2636     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2637         if (transfer_end_table[i] == fn)
2638             return i;
2639 
2640     return -1;
2641 }
2642 
2643 static int ide_drive_post_load(void *opaque, int version_id)
2644 {
2645     IDEState *s = opaque;
2646 
2647     if (s->blk && s->identify_set) {
2648         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2649     }
2650     return 0;
2651 }
2652 
2653 static int ide_drive_pio_post_load(void *opaque, int version_id)
2654 {
2655     IDEState *s = opaque;
2656 
2657     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2658         return -EINVAL;
2659     }
2660     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2661     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2662     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2663     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2664 
2665     return 0;
2666 }
2667 
2668 static void ide_drive_pio_pre_save(void *opaque)
2669 {
2670     IDEState *s = opaque;
2671     int idx;
2672 
2673     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2674     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2675 
2676     idx = transfer_end_table_idx(s->end_transfer_func);
2677     if (idx == -1) {
2678         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2679                         __func__);
2680         s->end_transfer_fn_idx = 2;
2681     } else {
2682         s->end_transfer_fn_idx = idx;
2683     }
2684 }
2685 
2686 static bool ide_drive_pio_state_needed(void *opaque)
2687 {
2688     IDEState *s = opaque;
2689 
2690     return ((s->status & DRQ_STAT) != 0)
2691         || (s->bus->error_status & IDE_RETRY_PIO);
2692 }
2693 
2694 static bool ide_tray_state_needed(void *opaque)
2695 {
2696     IDEState *s = opaque;
2697 
2698     return s->tray_open || s->tray_locked;
2699 }
2700 
2701 static bool ide_atapi_gesn_needed(void *opaque)
2702 {
2703     IDEState *s = opaque;
2704 
2705     return s->events.new_media || s->events.eject_request;
2706 }
2707 
2708 static bool ide_error_needed(void *opaque)
2709 {
2710     IDEBus *bus = opaque;
2711 
2712     return (bus->error_status != 0);
2713 }
2714 
2715 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2716 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2717     .name ="ide_drive/atapi/gesn_state",
2718     .version_id = 1,
2719     .minimum_version_id = 1,
2720     .needed = ide_atapi_gesn_needed,
2721     .fields = (VMStateField[]) {
2722         VMSTATE_BOOL(events.new_media, IDEState),
2723         VMSTATE_BOOL(events.eject_request, IDEState),
2724         VMSTATE_END_OF_LIST()
2725     }
2726 };
2727 
2728 static const VMStateDescription vmstate_ide_tray_state = {
2729     .name = "ide_drive/tray_state",
2730     .version_id = 1,
2731     .minimum_version_id = 1,
2732     .needed = ide_tray_state_needed,
2733     .fields = (VMStateField[]) {
2734         VMSTATE_BOOL(tray_open, IDEState),
2735         VMSTATE_BOOL(tray_locked, IDEState),
2736         VMSTATE_END_OF_LIST()
2737     }
2738 };
2739 
2740 static const VMStateDescription vmstate_ide_drive_pio_state = {
2741     .name = "ide_drive/pio_state",
2742     .version_id = 1,
2743     .minimum_version_id = 1,
2744     .pre_save = ide_drive_pio_pre_save,
2745     .post_load = ide_drive_pio_post_load,
2746     .needed = ide_drive_pio_state_needed,
2747     .fields = (VMStateField[]) {
2748         VMSTATE_INT32(req_nb_sectors, IDEState),
2749         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2750 			     vmstate_info_uint8, uint8_t),
2751         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2752         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2753         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2754         VMSTATE_INT32(elementary_transfer_size, IDEState),
2755         VMSTATE_INT32(packet_transfer_size, IDEState),
2756         VMSTATE_END_OF_LIST()
2757     }
2758 };
2759 
2760 const VMStateDescription vmstate_ide_drive = {
2761     .name = "ide_drive",
2762     .version_id = 3,
2763     .minimum_version_id = 0,
2764     .post_load = ide_drive_post_load,
2765     .fields = (VMStateField[]) {
2766         VMSTATE_INT32(mult_sectors, IDEState),
2767         VMSTATE_INT32(identify_set, IDEState),
2768         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2769         VMSTATE_UINT8(feature, IDEState),
2770         VMSTATE_UINT8(error, IDEState),
2771         VMSTATE_UINT32(nsector, IDEState),
2772         VMSTATE_UINT8(sector, IDEState),
2773         VMSTATE_UINT8(lcyl, IDEState),
2774         VMSTATE_UINT8(hcyl, IDEState),
2775         VMSTATE_UINT8(hob_feature, IDEState),
2776         VMSTATE_UINT8(hob_sector, IDEState),
2777         VMSTATE_UINT8(hob_nsector, IDEState),
2778         VMSTATE_UINT8(hob_lcyl, IDEState),
2779         VMSTATE_UINT8(hob_hcyl, IDEState),
2780         VMSTATE_UINT8(select, IDEState),
2781         VMSTATE_UINT8(status, IDEState),
2782         VMSTATE_UINT8(lba48, IDEState),
2783         VMSTATE_UINT8(sense_key, IDEState),
2784         VMSTATE_UINT8(asc, IDEState),
2785         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2786         VMSTATE_END_OF_LIST()
2787     },
2788     .subsections = (const VMStateDescription*[]) {
2789         &vmstate_ide_drive_pio_state,
2790         &vmstate_ide_tray_state,
2791         &vmstate_ide_atapi_gesn_state,
2792         NULL
2793     }
2794 };
2795 
2796 static const VMStateDescription vmstate_ide_error_status = {
2797     .name ="ide_bus/error",
2798     .version_id = 2,
2799     .minimum_version_id = 1,
2800     .needed = ide_error_needed,
2801     .fields = (VMStateField[]) {
2802         VMSTATE_INT32(error_status, IDEBus),
2803         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2804         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2805         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2806         VMSTATE_END_OF_LIST()
2807     }
2808 };
2809 
2810 const VMStateDescription vmstate_ide_bus = {
2811     .name = "ide_bus",
2812     .version_id = 1,
2813     .minimum_version_id = 1,
2814     .fields = (VMStateField[]) {
2815         VMSTATE_UINT8(cmd, IDEBus),
2816         VMSTATE_UINT8(unit, IDEBus),
2817         VMSTATE_END_OF_LIST()
2818     },
2819     .subsections = (const VMStateDescription*[]) {
2820         &vmstate_ide_error_status,
2821         NULL
2822     }
2823 };
2824 
2825 void ide_drive_get(DriveInfo **hd, int n)
2826 {
2827     int i;
2828     int highest_bus = drive_get_max_bus(IF_IDE) + 1;
2829     int max_devs = drive_get_max_devs(IF_IDE);
2830     int n_buses = max_devs ? (n / max_devs) : n;
2831 
2832     /*
2833      * Note: The number of actual buses available is not known.
2834      * We compute this based on the size of the DriveInfo* array, n.
2835      * If it is less than max_devs * <num_real_buses>,
2836      * We will stop looking for drives prematurely instead of overfilling
2837      * the array.
2838      */
2839 
2840     if (highest_bus > n_buses) {
2841         error_report("Too many IDE buses defined (%d > %d)",
2842                      highest_bus, n_buses);
2843         exit(1);
2844     }
2845 
2846     for (i = 0; i < n; i++) {
2847         hd[i] = drive_get_by_index(IF_IDE, i);
2848     }
2849 }
2850