xref: /qemu/hw/ide/core.c (revision 2c533c54)
1 /*
2  * QEMU IDE disk and CD/DVD-ROM Emulator
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  * Copyright (c) 2006 Openedhand Ltd.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/i386/pc.h"
28 #include "hw/pci/pci.h"
29 #include "hw/isa/isa.h"
30 #include "qemu/error-report.h"
31 #include "qemu/timer.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/dma.h"
34 #include "hw/block/block.h"
35 #include "sysemu/block-backend.h"
36 #include "qemu/cutils.h"
37 
38 #include "hw/ide/internal.h"
39 
40 /* These values were based on a Seagate ST3500418AS but have been modified
41    to make more sense in QEMU */
42 static const int smart_attributes[][12] = {
43     /* id,  flags, hflags, val, wrst, raw (6 bytes), threshold */
44     /* raw read error rate*/
45     { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
46     /* spin up */
47     { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
48     /* start stop count */
49     { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
50     /* remapped sectors */
51     { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
52     /* power on hours */
53     { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54     /* power cycle count */
55     { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
56     /* airflow-temperature-celsius */
57     { 190,  0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
58 };
59 
60 static void ide_dummy_transfer_stop(IDEState *s);
61 
62 static void padstr(char *str, const char *src, int len)
63 {
64     int i, v;
65     for(i = 0; i < len; i++) {
66         if (*src)
67             v = *src++;
68         else
69             v = ' ';
70         str[i^1] = v;
71     }
72 }
73 
74 static void put_le16(uint16_t *p, unsigned int v)
75 {
76     *p = cpu_to_le16(v);
77 }
78 
79 static void ide_identify_size(IDEState *s)
80 {
81     uint16_t *p = (uint16_t *)s->identify_data;
82     put_le16(p + 60, s->nb_sectors);
83     put_le16(p + 61, s->nb_sectors >> 16);
84     put_le16(p + 100, s->nb_sectors);
85     put_le16(p + 101, s->nb_sectors >> 16);
86     put_le16(p + 102, s->nb_sectors >> 32);
87     put_le16(p + 103, s->nb_sectors >> 48);
88 }
89 
90 static void ide_identify(IDEState *s)
91 {
92     uint16_t *p;
93     unsigned int oldsize;
94     IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
95 
96     p = (uint16_t *)s->identify_data;
97     if (s->identify_set) {
98         goto fill_buffer;
99     }
100     memset(p, 0, sizeof(s->identify_data));
101 
102     put_le16(p + 0, 0x0040);
103     put_le16(p + 1, s->cylinders);
104     put_le16(p + 3, s->heads);
105     put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
106     put_le16(p + 5, 512); /* XXX: retired, remove ? */
107     put_le16(p + 6, s->sectors);
108     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
109     put_le16(p + 20, 3); /* XXX: retired, remove ? */
110     put_le16(p + 21, 512); /* cache size in sectors */
111     put_le16(p + 22, 4); /* ecc bytes */
112     padstr((char *)(p + 23), s->version, 8); /* firmware version */
113     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
114 #if MAX_MULT_SECTORS > 1
115     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
116 #endif
117     put_le16(p + 48, 1); /* dword I/O */
118     put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
119     put_le16(p + 51, 0x200); /* PIO transfer cycle */
120     put_le16(p + 52, 0x200); /* DMA transfer cycle */
121     put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
122     put_le16(p + 54, s->cylinders);
123     put_le16(p + 55, s->heads);
124     put_le16(p + 56, s->sectors);
125     oldsize = s->cylinders * s->heads * s->sectors;
126     put_le16(p + 57, oldsize);
127     put_le16(p + 58, oldsize >> 16);
128     if (s->mult_sectors)
129         put_le16(p + 59, 0x100 | s->mult_sectors);
130     /* *(p + 60) := nb_sectors       -- see ide_identify_size */
131     /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
132     put_le16(p + 62, 0x07); /* single word dma0-2 supported */
133     put_le16(p + 63, 0x07); /* mdma0-2 supported */
134     put_le16(p + 64, 0x03); /* pio3-4 supported */
135     put_le16(p + 65, 120);
136     put_le16(p + 66, 120);
137     put_le16(p + 67, 120);
138     put_le16(p + 68, 120);
139     if (dev && dev->conf.discard_granularity) {
140         put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
141     }
142 
143     if (s->ncq_queues) {
144         put_le16(p + 75, s->ncq_queues - 1);
145         /* NCQ supported */
146         put_le16(p + 76, (1 << 8));
147     }
148 
149     put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
150     put_le16(p + 81, 0x16); /* conforms to ata5 */
151     /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
152     put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
153     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
154     put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
155     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
156     if (s->wwn) {
157         put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
158     } else {
159         put_le16(p + 84, (1 << 14) | 0);
160     }
161     /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
162     if (blk_enable_write_cache(s->blk)) {
163         put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
164     } else {
165         put_le16(p + 85, (1 << 14) | 1);
166     }
167     /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
168     put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
169     /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
170     if (s->wwn) {
171         put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
172     } else {
173         put_le16(p + 87, (1 << 14) | 0);
174     }
175     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
176     put_le16(p + 93, 1 | (1 << 14) | 0x2000);
177     /* *(p + 100) := nb_sectors       -- see ide_identify_size */
178     /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
179     /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
180     /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
181 
182     if (dev && dev->conf.physical_block_size)
183         put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
184     if (s->wwn) {
185         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
186         put_le16(p + 108, s->wwn >> 48);
187         put_le16(p + 109, s->wwn >> 32);
188         put_le16(p + 110, s->wwn >> 16);
189         put_le16(p + 111, s->wwn);
190     }
191     if (dev && dev->conf.discard_granularity) {
192         put_le16(p + 169, 1); /* TRIM support */
193     }
194 
195     ide_identify_size(s);
196     s->identify_set = 1;
197 
198 fill_buffer:
199     memcpy(s->io_buffer, p, sizeof(s->identify_data));
200 }
201 
202 static void ide_atapi_identify(IDEState *s)
203 {
204     uint16_t *p;
205 
206     p = (uint16_t *)s->identify_data;
207     if (s->identify_set) {
208         goto fill_buffer;
209     }
210     memset(p, 0, sizeof(s->identify_data));
211 
212     /* Removable CDROM, 50us response, 12 byte packets */
213     put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
214     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
215     put_le16(p + 20, 3); /* buffer type */
216     put_le16(p + 21, 512); /* cache size in sectors */
217     put_le16(p + 22, 4); /* ecc bytes */
218     padstr((char *)(p + 23), s->version, 8); /* firmware version */
219     padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
220     put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
221 #ifdef USE_DMA_CDROM
222     put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
223     put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
224     put_le16(p + 62, 7);  /* single word dma0-2 supported */
225     put_le16(p + 63, 7);  /* mdma0-2 supported */
226 #else
227     put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
228     put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
229     put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
230 #endif
231     put_le16(p + 64, 3); /* pio3-4 supported */
232     put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
233     put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
234     put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
235     put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
236 
237     put_le16(p + 71, 30); /* in ns */
238     put_le16(p + 72, 30); /* in ns */
239 
240     if (s->ncq_queues) {
241         put_le16(p + 75, s->ncq_queues - 1);
242         /* NCQ supported */
243         put_le16(p + 76, (1 << 8));
244     }
245 
246     put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
247     if (s->wwn) {
248         put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
249         put_le16(p + 87, (1 << 8)); /* WWN enabled */
250     }
251 
252 #ifdef USE_DMA_CDROM
253     put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
254 #endif
255 
256     if (s->wwn) {
257         /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
258         put_le16(p + 108, s->wwn >> 48);
259         put_le16(p + 109, s->wwn >> 32);
260         put_le16(p + 110, s->wwn >> 16);
261         put_le16(p + 111, s->wwn);
262     }
263 
264     s->identify_set = 1;
265 
266 fill_buffer:
267     memcpy(s->io_buffer, p, sizeof(s->identify_data));
268 }
269 
270 static void ide_cfata_identify_size(IDEState *s)
271 {
272     uint16_t *p = (uint16_t *)s->identify_data;
273     put_le16(p + 7, s->nb_sectors >> 16);  /* Sectors per card */
274     put_le16(p + 8, s->nb_sectors);        /* Sectors per card */
275     put_le16(p + 60, s->nb_sectors);       /* Total LBA sectors */
276     put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
277 }
278 
279 static void ide_cfata_identify(IDEState *s)
280 {
281     uint16_t *p;
282     uint32_t cur_sec;
283 
284     p = (uint16_t *)s->identify_data;
285     if (s->identify_set) {
286         goto fill_buffer;
287     }
288     memset(p, 0, sizeof(s->identify_data));
289 
290     cur_sec = s->cylinders * s->heads * s->sectors;
291 
292     put_le16(p + 0, 0x848a);			/* CF Storage Card signature */
293     put_le16(p + 1, s->cylinders);		/* Default cylinders */
294     put_le16(p + 3, s->heads);			/* Default heads */
295     put_le16(p + 6, s->sectors);		/* Default sectors per track */
296     /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
297     /* *(p + 8) := nb_sectors       -- see ide_cfata_identify_size */
298     padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
299     put_le16(p + 22, 0x0004);			/* ECC bytes */
300     padstr((char *) (p + 23), s->version, 8);	/* Firmware Revision */
301     padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
302 #if MAX_MULT_SECTORS > 1
303     put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
304 #else
305     put_le16(p + 47, 0x0000);
306 #endif
307     put_le16(p + 49, 0x0f00);			/* Capabilities */
308     put_le16(p + 51, 0x0002);			/* PIO cycle timing mode */
309     put_le16(p + 52, 0x0001);			/* DMA cycle timing mode */
310     put_le16(p + 53, 0x0003);			/* Translation params valid */
311     put_le16(p + 54, s->cylinders);		/* Current cylinders */
312     put_le16(p + 55, s->heads);			/* Current heads */
313     put_le16(p + 56, s->sectors);		/* Current sectors */
314     put_le16(p + 57, cur_sec);			/* Current capacity */
315     put_le16(p + 58, cur_sec >> 16);		/* Current capacity */
316     if (s->mult_sectors)			/* Multiple sector setting */
317         put_le16(p + 59, 0x100 | s->mult_sectors);
318     /* *(p + 60) := nb_sectors       -- see ide_cfata_identify_size */
319     /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
320     put_le16(p + 63, 0x0203);			/* Multiword DMA capability */
321     put_le16(p + 64, 0x0001);			/* Flow Control PIO support */
322     put_le16(p + 65, 0x0096);			/* Min. Multiword DMA cycle */
323     put_le16(p + 66, 0x0096);			/* Rec. Multiword DMA cycle */
324     put_le16(p + 68, 0x00b4);			/* Min. PIO cycle time */
325     put_le16(p + 82, 0x400c);			/* Command Set supported */
326     put_le16(p + 83, 0x7068);			/* Command Set supported */
327     put_le16(p + 84, 0x4000);			/* Features supported */
328     put_le16(p + 85, 0x000c);			/* Command Set enabled */
329     put_le16(p + 86, 0x7044);			/* Command Set enabled */
330     put_le16(p + 87, 0x4000);			/* Features enabled */
331     put_le16(p + 91, 0x4060);			/* Current APM level */
332     put_le16(p + 129, 0x0002);			/* Current features option */
333     put_le16(p + 130, 0x0005);			/* Reassigned sectors */
334     put_le16(p + 131, 0x0001);			/* Initial power mode */
335     put_le16(p + 132, 0x0000);			/* User signature */
336     put_le16(p + 160, 0x8100);			/* Power requirement */
337     put_le16(p + 161, 0x8001);			/* CF command set */
338 
339     ide_cfata_identify_size(s);
340     s->identify_set = 1;
341 
342 fill_buffer:
343     memcpy(s->io_buffer, p, sizeof(s->identify_data));
344 }
345 
346 static void ide_set_signature(IDEState *s)
347 {
348     s->select &= 0xf0; /* clear head */
349     /* put signature */
350     s->nsector = 1;
351     s->sector = 1;
352     if (s->drive_kind == IDE_CD) {
353         s->lcyl = 0x14;
354         s->hcyl = 0xeb;
355     } else if (s->blk) {
356         s->lcyl = 0;
357         s->hcyl = 0;
358     } else {
359         s->lcyl = 0xff;
360         s->hcyl = 0xff;
361     }
362 }
363 
364 typedef struct TrimAIOCB {
365     BlockAIOCB common;
366     BlockBackend *blk;
367     QEMUBH *bh;
368     int ret;
369     QEMUIOVector *qiov;
370     BlockAIOCB *aiocb;
371     int i, j;
372 } TrimAIOCB;
373 
374 static void trim_aio_cancel(BlockAIOCB *acb)
375 {
376     TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
377 
378     /* Exit the loop so ide_issue_trim_cb will not continue  */
379     iocb->j = iocb->qiov->niov - 1;
380     iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
381 
382     iocb->ret = -ECANCELED;
383 
384     if (iocb->aiocb) {
385         blk_aio_cancel_async(iocb->aiocb);
386         iocb->aiocb = NULL;
387     }
388 }
389 
390 static const AIOCBInfo trim_aiocb_info = {
391     .aiocb_size         = sizeof(TrimAIOCB),
392     .cancel_async       = trim_aio_cancel,
393 };
394 
395 static void ide_trim_bh_cb(void *opaque)
396 {
397     TrimAIOCB *iocb = opaque;
398 
399     iocb->common.cb(iocb->common.opaque, iocb->ret);
400 
401     qemu_bh_delete(iocb->bh);
402     iocb->bh = NULL;
403     qemu_aio_unref(iocb);
404 }
405 
406 static void ide_issue_trim_cb(void *opaque, int ret)
407 {
408     TrimAIOCB *iocb = opaque;
409     if (ret >= 0) {
410         while (iocb->j < iocb->qiov->niov) {
411             int j = iocb->j;
412             while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
413                 int i = iocb->i;
414                 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
415 
416                 /* 6-byte LBA + 2-byte range per entry */
417                 uint64_t entry = le64_to_cpu(buffer[i]);
418                 uint64_t sector = entry & 0x0000ffffffffffffULL;
419                 uint16_t count = entry >> 48;
420 
421                 if (count == 0) {
422                     continue;
423                 }
424 
425                 /* Got an entry! Submit and exit.  */
426                 iocb->aiocb = blk_aio_pdiscard(iocb->blk,
427                                                sector << BDRV_SECTOR_BITS,
428                                                count << BDRV_SECTOR_BITS,
429                                                ide_issue_trim_cb, opaque);
430                 return;
431             }
432 
433             iocb->j++;
434             iocb->i = -1;
435         }
436     } else {
437         iocb->ret = ret;
438     }
439 
440     iocb->aiocb = NULL;
441     if (iocb->bh) {
442         qemu_bh_schedule(iocb->bh);
443     }
444 }
445 
446 BlockAIOCB *ide_issue_trim(
447         int64_t offset, QEMUIOVector *qiov,
448         BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
449 {
450     BlockBackend *blk = opaque;
451     TrimAIOCB *iocb;
452 
453     iocb = blk_aio_get(&trim_aiocb_info, blk, cb, cb_opaque);
454     iocb->blk = blk;
455     iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
456     iocb->ret = 0;
457     iocb->qiov = qiov;
458     iocb->i = -1;
459     iocb->j = 0;
460     ide_issue_trim_cb(iocb, 0);
461     return &iocb->common;
462 }
463 
464 void ide_abort_command(IDEState *s)
465 {
466     ide_transfer_stop(s);
467     s->status = READY_STAT | ERR_STAT;
468     s->error = ABRT_ERR;
469 }
470 
471 static void ide_set_retry(IDEState *s)
472 {
473     s->bus->retry_unit = s->unit;
474     s->bus->retry_sector_num = ide_get_sector(s);
475     s->bus->retry_nsector = s->nsector;
476 }
477 
478 static void ide_clear_retry(IDEState *s)
479 {
480     s->bus->retry_unit = -1;
481     s->bus->retry_sector_num = 0;
482     s->bus->retry_nsector = 0;
483 }
484 
485 /* prepare data transfer and tell what to do after */
486 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
487                         EndTransferFunc *end_transfer_func)
488 {
489     s->end_transfer_func = end_transfer_func;
490     s->data_ptr = buf;
491     s->data_end = buf + size;
492     ide_set_retry(s);
493     if (!(s->status & ERR_STAT)) {
494         s->status |= DRQ_STAT;
495     }
496     if (s->bus->dma->ops->start_transfer) {
497         s->bus->dma->ops->start_transfer(s->bus->dma);
498     }
499 }
500 
501 static void ide_cmd_done(IDEState *s)
502 {
503     if (s->bus->dma->ops->cmd_done) {
504         s->bus->dma->ops->cmd_done(s->bus->dma);
505     }
506 }
507 
508 static void ide_transfer_halt(IDEState *s,
509                               void(*end_transfer_func)(IDEState *),
510                               bool notify)
511 {
512     s->end_transfer_func = end_transfer_func;
513     s->data_ptr = s->io_buffer;
514     s->data_end = s->io_buffer;
515     s->status &= ~DRQ_STAT;
516     if (notify) {
517         ide_cmd_done(s);
518     }
519 }
520 
521 void ide_transfer_stop(IDEState *s)
522 {
523     ide_transfer_halt(s, ide_transfer_stop, true);
524 }
525 
526 static void ide_transfer_cancel(IDEState *s)
527 {
528     ide_transfer_halt(s, ide_transfer_cancel, false);
529 }
530 
531 int64_t ide_get_sector(IDEState *s)
532 {
533     int64_t sector_num;
534     if (s->select & 0x40) {
535         /* lba */
536 	if (!s->lba48) {
537 	    sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
538 		(s->lcyl << 8) | s->sector;
539 	} else {
540 	    sector_num = ((int64_t)s->hob_hcyl << 40) |
541 		((int64_t) s->hob_lcyl << 32) |
542 		((int64_t) s->hob_sector << 24) |
543 		((int64_t) s->hcyl << 16) |
544 		((int64_t) s->lcyl << 8) | s->sector;
545 	}
546     } else {
547         sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
548             (s->select & 0x0f) * s->sectors + (s->sector - 1);
549     }
550     return sector_num;
551 }
552 
553 void ide_set_sector(IDEState *s, int64_t sector_num)
554 {
555     unsigned int cyl, r;
556     if (s->select & 0x40) {
557 	if (!s->lba48) {
558             s->select = (s->select & 0xf0) | (sector_num >> 24);
559             s->hcyl = (sector_num >> 16);
560             s->lcyl = (sector_num >> 8);
561             s->sector = (sector_num);
562 	} else {
563 	    s->sector = sector_num;
564 	    s->lcyl = sector_num >> 8;
565 	    s->hcyl = sector_num >> 16;
566 	    s->hob_sector = sector_num >> 24;
567 	    s->hob_lcyl = sector_num >> 32;
568 	    s->hob_hcyl = sector_num >> 40;
569 	}
570     } else {
571         cyl = sector_num / (s->heads * s->sectors);
572         r = sector_num % (s->heads * s->sectors);
573         s->hcyl = cyl >> 8;
574         s->lcyl = cyl;
575         s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
576         s->sector = (r % s->sectors) + 1;
577     }
578 }
579 
580 static void ide_rw_error(IDEState *s) {
581     ide_abort_command(s);
582     ide_set_irq(s->bus);
583 }
584 
585 static bool ide_sect_range_ok(IDEState *s,
586                               uint64_t sector, uint64_t nb_sectors)
587 {
588     uint64_t total_sectors;
589 
590     blk_get_geometry(s->blk, &total_sectors);
591     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
592         return false;
593     }
594     return true;
595 }
596 
597 static void ide_buffered_readv_cb(void *opaque, int ret)
598 {
599     IDEBufferedRequest *req = opaque;
600     if (!req->orphaned) {
601         if (!ret) {
602             qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
603                                 req->original_qiov->size);
604         }
605         req->original_cb(req->original_opaque, ret);
606     }
607     QLIST_REMOVE(req, list);
608     qemu_vfree(req->iov.iov_base);
609     g_free(req);
610 }
611 
612 #define MAX_BUFFERED_REQS 16
613 
614 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
615                                QEMUIOVector *iov, int nb_sectors,
616                                BlockCompletionFunc *cb, void *opaque)
617 {
618     BlockAIOCB *aioreq;
619     IDEBufferedRequest *req;
620     int c = 0;
621 
622     QLIST_FOREACH(req, &s->buffered_requests, list) {
623         c++;
624     }
625     if (c > MAX_BUFFERED_REQS) {
626         return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
627     }
628 
629     req = g_new0(IDEBufferedRequest, 1);
630     req->original_qiov = iov;
631     req->original_cb = cb;
632     req->original_opaque = opaque;
633     req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
634     req->iov.iov_len = iov->size;
635     qemu_iovec_init_external(&req->qiov, &req->iov, 1);
636 
637     aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
638                             &req->qiov, 0, ide_buffered_readv_cb, req);
639 
640     QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
641     return aioreq;
642 }
643 
644 /**
645  * Cancel all pending DMA requests.
646  * Any buffered DMA requests are instantly canceled,
647  * but any pending unbuffered DMA requests must be waited on.
648  */
649 void ide_cancel_dma_sync(IDEState *s)
650 {
651     IDEBufferedRequest *req;
652 
653     /* First invoke the callbacks of all buffered requests
654      * and flag those requests as orphaned. Ideally there
655      * are no unbuffered (Scatter Gather DMA Requests or
656      * write requests) pending and we can avoid to drain. */
657     QLIST_FOREACH(req, &s->buffered_requests, list) {
658         if (!req->orphaned) {
659 #ifdef DEBUG_IDE
660             printf("%s: invoking cb %p of buffered request %p with"
661                    " -ECANCELED\n", __func__, req->original_cb, req);
662 #endif
663             req->original_cb(req->original_opaque, -ECANCELED);
664         }
665         req->orphaned = true;
666     }
667 
668     /*
669      * We can't cancel Scatter Gather DMA in the middle of the
670      * operation or a partial (not full) DMA transfer would reach
671      * the storage so we wait for completion instead (we beahve
672      * like if the DMA was completed by the time the guest trying
673      * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
674      * set).
675      *
676      * In the future we'll be able to safely cancel the I/O if the
677      * whole DMA operation will be submitted to disk with a single
678      * aio operation with preadv/pwritev.
679      */
680     if (s->bus->dma->aiocb) {
681 #ifdef DEBUG_IDE
682         printf("%s: draining all remaining requests", __func__);
683 #endif
684         blk_drain(s->blk);
685         assert(s->bus->dma->aiocb == NULL);
686     }
687 }
688 
689 static void ide_sector_read(IDEState *s);
690 
691 static void ide_sector_read_cb(void *opaque, int ret)
692 {
693     IDEState *s = opaque;
694     int n;
695 
696     s->pio_aiocb = NULL;
697     s->status &= ~BUSY_STAT;
698 
699     if (ret == -ECANCELED) {
700         return;
701     }
702     if (ret != 0) {
703         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
704                                 IDE_RETRY_READ)) {
705             return;
706         }
707     }
708 
709     block_acct_done(blk_get_stats(s->blk), &s->acct);
710 
711     n = s->nsector;
712     if (n > s->req_nb_sectors) {
713         n = s->req_nb_sectors;
714     }
715 
716     ide_set_sector(s, ide_get_sector(s) + n);
717     s->nsector -= n;
718     /* Allow the guest to read the io_buffer */
719     ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
720     ide_set_irq(s->bus);
721 }
722 
723 static void ide_sector_read(IDEState *s)
724 {
725     int64_t sector_num;
726     int n;
727 
728     s->status = READY_STAT | SEEK_STAT;
729     s->error = 0; /* not needed by IDE spec, but needed by Windows */
730     sector_num = ide_get_sector(s);
731     n = s->nsector;
732 
733     if (n == 0) {
734         ide_transfer_stop(s);
735         return;
736     }
737 
738     s->status |= BUSY_STAT;
739 
740     if (n > s->req_nb_sectors) {
741         n = s->req_nb_sectors;
742     }
743 
744 #if defined(DEBUG_IDE)
745     printf("sector=%" PRId64 "\n", sector_num);
746 #endif
747 
748     if (!ide_sect_range_ok(s, sector_num, n)) {
749         ide_rw_error(s);
750         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
751         return;
752     }
753 
754     s->iov.iov_base = s->io_buffer;
755     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
756     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
757 
758     block_acct_start(blk_get_stats(s->blk), &s->acct,
759                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
760     s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
761                                       ide_sector_read_cb, s);
762 }
763 
764 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
765 {
766     if (s->bus->dma->ops->commit_buf) {
767         s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
768     }
769     s->io_buffer_offset += tx_bytes;
770     qemu_sglist_destroy(&s->sg);
771 }
772 
773 void ide_set_inactive(IDEState *s, bool more)
774 {
775     s->bus->dma->aiocb = NULL;
776     ide_clear_retry(s);
777     if (s->bus->dma->ops->set_inactive) {
778         s->bus->dma->ops->set_inactive(s->bus->dma, more);
779     }
780     ide_cmd_done(s);
781 }
782 
783 void ide_dma_error(IDEState *s)
784 {
785     dma_buf_commit(s, 0);
786     ide_abort_command(s);
787     ide_set_inactive(s, false);
788     ide_set_irq(s->bus);
789 }
790 
791 int ide_handle_rw_error(IDEState *s, int error, int op)
792 {
793     bool is_read = (op & IDE_RETRY_READ) != 0;
794     BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
795 
796     if (action == BLOCK_ERROR_ACTION_STOP) {
797         assert(s->bus->retry_unit == s->unit);
798         s->bus->error_status = op;
799     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
800         block_acct_failed(blk_get_stats(s->blk), &s->acct);
801         if (IS_IDE_RETRY_DMA(op)) {
802             ide_dma_error(s);
803         } else if (IS_IDE_RETRY_ATAPI(op)) {
804             ide_atapi_io_error(s, -error);
805         } else {
806             ide_rw_error(s);
807         }
808     }
809     blk_error_action(s->blk, action, is_read, error);
810     return action != BLOCK_ERROR_ACTION_IGNORE;
811 }
812 
813 static void ide_dma_cb(void *opaque, int ret)
814 {
815     IDEState *s = opaque;
816     int n;
817     int64_t sector_num;
818     uint64_t offset;
819     bool stay_active = false;
820 
821     if (ret == -ECANCELED) {
822         return;
823     }
824     if (ret < 0) {
825         if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
826             return;
827         }
828     }
829 
830     n = s->io_buffer_size >> 9;
831     if (n > s->nsector) {
832         /* The PRDs were longer than needed for this request. Shorten them so
833          * we don't get a negative remainder. The Active bit must remain set
834          * after the request completes. */
835         n = s->nsector;
836         stay_active = true;
837     }
838 
839     sector_num = ide_get_sector(s);
840     if (n > 0) {
841         assert(n * 512 == s->sg.size);
842         dma_buf_commit(s, s->sg.size);
843         sector_num += n;
844         ide_set_sector(s, sector_num);
845         s->nsector -= n;
846     }
847 
848     /* end of transfer ? */
849     if (s->nsector == 0) {
850         s->status = READY_STAT | SEEK_STAT;
851         ide_set_irq(s->bus);
852         goto eot;
853     }
854 
855     /* launch next transfer */
856     n = s->nsector;
857     s->io_buffer_index = 0;
858     s->io_buffer_size = n * 512;
859     if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
860         /* The PRDs were too short. Reset the Active bit, but don't raise an
861          * interrupt. */
862         s->status = READY_STAT | SEEK_STAT;
863         dma_buf_commit(s, 0);
864         goto eot;
865     }
866 
867 #ifdef DEBUG_AIO
868     printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n",
869            sector_num, n, s->dma_cmd);
870 #endif
871 
872     if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
873         !ide_sect_range_ok(s, sector_num, n)) {
874         ide_dma_error(s);
875         block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
876         return;
877     }
878 
879     offset = sector_num << BDRV_SECTOR_BITS;
880     switch (s->dma_cmd) {
881     case IDE_DMA_READ:
882         s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
883                                           ide_dma_cb, s);
884         break;
885     case IDE_DMA_WRITE:
886         s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
887                                            ide_dma_cb, s);
888         break;
889     case IDE_DMA_TRIM:
890         s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
891                                         &s->sg, offset,
892                                         ide_issue_trim, s->blk, ide_dma_cb, s,
893                                         DMA_DIRECTION_TO_DEVICE);
894         break;
895     default:
896         abort();
897     }
898     return;
899 
900 eot:
901     if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
902         block_acct_done(blk_get_stats(s->blk), &s->acct);
903     }
904     ide_set_inactive(s, stay_active);
905 }
906 
907 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
908 {
909     s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
910     s->io_buffer_size = 0;
911     s->dma_cmd = dma_cmd;
912 
913     switch (dma_cmd) {
914     case IDE_DMA_READ:
915         block_acct_start(blk_get_stats(s->blk), &s->acct,
916                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
917         break;
918     case IDE_DMA_WRITE:
919         block_acct_start(blk_get_stats(s->blk), &s->acct,
920                          s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
921         break;
922     default:
923         break;
924     }
925 
926     ide_start_dma(s, ide_dma_cb);
927 }
928 
929 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
930 {
931     s->io_buffer_index = 0;
932     ide_set_retry(s);
933     if (s->bus->dma->ops->start_dma) {
934         s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
935     }
936 }
937 
938 static void ide_sector_write(IDEState *s);
939 
940 static void ide_sector_write_timer_cb(void *opaque)
941 {
942     IDEState *s = opaque;
943     ide_set_irq(s->bus);
944 }
945 
946 static void ide_sector_write_cb(void *opaque, int ret)
947 {
948     IDEState *s = opaque;
949     int n;
950 
951     if (ret == -ECANCELED) {
952         return;
953     }
954 
955     s->pio_aiocb = NULL;
956     s->status &= ~BUSY_STAT;
957 
958     if (ret != 0) {
959         if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
960             return;
961         }
962     }
963 
964     block_acct_done(blk_get_stats(s->blk), &s->acct);
965 
966     n = s->nsector;
967     if (n > s->req_nb_sectors) {
968         n = s->req_nb_sectors;
969     }
970     s->nsector -= n;
971 
972     ide_set_sector(s, ide_get_sector(s) + n);
973     if (s->nsector == 0) {
974         /* no more sectors to write */
975         ide_transfer_stop(s);
976     } else {
977         int n1 = s->nsector;
978         if (n1 > s->req_nb_sectors) {
979             n1 = s->req_nb_sectors;
980         }
981         ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
982                            ide_sector_write);
983     }
984 
985     if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
986         /* It seems there is a bug in the Windows 2000 installer HDD
987            IDE driver which fills the disk with empty logs when the
988            IDE write IRQ comes too early. This hack tries to correct
989            that at the expense of slower write performances. Use this
990            option _only_ to install Windows 2000. You must disable it
991            for normal use. */
992         timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
993                   (NANOSECONDS_PER_SECOND / 1000));
994     } else {
995         ide_set_irq(s->bus);
996     }
997 }
998 
999 static void ide_sector_write(IDEState *s)
1000 {
1001     int64_t sector_num;
1002     int n;
1003 
1004     s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1005     sector_num = ide_get_sector(s);
1006 #if defined(DEBUG_IDE)
1007     printf("sector=%" PRId64 "\n", sector_num);
1008 #endif
1009     n = s->nsector;
1010     if (n > s->req_nb_sectors) {
1011         n = s->req_nb_sectors;
1012     }
1013 
1014     if (!ide_sect_range_ok(s, sector_num, n)) {
1015         ide_rw_error(s);
1016         block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1017         return;
1018     }
1019 
1020     s->iov.iov_base = s->io_buffer;
1021     s->iov.iov_len  = n * BDRV_SECTOR_SIZE;
1022     qemu_iovec_init_external(&s->qiov, &s->iov, 1);
1023 
1024     block_acct_start(blk_get_stats(s->blk), &s->acct,
1025                      n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1026     s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1027                                    &s->qiov, 0, ide_sector_write_cb, s);
1028 }
1029 
1030 static void ide_flush_cb(void *opaque, int ret)
1031 {
1032     IDEState *s = opaque;
1033 
1034     s->pio_aiocb = NULL;
1035 
1036     if (ret == -ECANCELED) {
1037         return;
1038     }
1039     if (ret < 0) {
1040         /* XXX: What sector number to set here? */
1041         if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1042             return;
1043         }
1044     }
1045 
1046     if (s->blk) {
1047         block_acct_done(blk_get_stats(s->blk), &s->acct);
1048     }
1049     s->status = READY_STAT | SEEK_STAT;
1050     ide_cmd_done(s);
1051     ide_set_irq(s->bus);
1052 }
1053 
1054 static void ide_flush_cache(IDEState *s)
1055 {
1056     if (s->blk == NULL) {
1057         ide_flush_cb(s, 0);
1058         return;
1059     }
1060 
1061     s->status |= BUSY_STAT;
1062     ide_set_retry(s);
1063     block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1064     s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1065 }
1066 
1067 static void ide_cfata_metadata_inquiry(IDEState *s)
1068 {
1069     uint16_t *p;
1070     uint32_t spd;
1071 
1072     p = (uint16_t *) s->io_buffer;
1073     memset(p, 0, 0x200);
1074     spd = ((s->mdata_size - 1) >> 9) + 1;
1075 
1076     put_le16(p + 0, 0x0001);			/* Data format revision */
1077     put_le16(p + 1, 0x0000);			/* Media property: silicon */
1078     put_le16(p + 2, s->media_changed);		/* Media status */
1079     put_le16(p + 3, s->mdata_size & 0xffff);	/* Capacity in bytes (low) */
1080     put_le16(p + 4, s->mdata_size >> 16);	/* Capacity in bytes (high) */
1081     put_le16(p + 5, spd & 0xffff);		/* Sectors per device (low) */
1082     put_le16(p + 6, spd >> 16);			/* Sectors per device (high) */
1083 }
1084 
1085 static void ide_cfata_metadata_read(IDEState *s)
1086 {
1087     uint16_t *p;
1088 
1089     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1090         s->status = ERR_STAT;
1091         s->error = ABRT_ERR;
1092         return;
1093     }
1094 
1095     p = (uint16_t *) s->io_buffer;
1096     memset(p, 0, 0x200);
1097 
1098     put_le16(p + 0, s->media_changed);		/* Media status */
1099     memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1100                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1101                                     s->nsector << 9), 0x200 - 2));
1102 }
1103 
1104 static void ide_cfata_metadata_write(IDEState *s)
1105 {
1106     if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1107         s->status = ERR_STAT;
1108         s->error = ABRT_ERR;
1109         return;
1110     }
1111 
1112     s->media_changed = 0;
1113 
1114     memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1115                     s->io_buffer + 2,
1116                     MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1117                                     s->nsector << 9), 0x200 - 2));
1118 }
1119 
1120 /* called when the inserted state of the media has changed */
1121 static void ide_cd_change_cb(void *opaque, bool load)
1122 {
1123     IDEState *s = opaque;
1124     uint64_t nb_sectors;
1125 
1126     s->tray_open = !load;
1127     blk_get_geometry(s->blk, &nb_sectors);
1128     s->nb_sectors = nb_sectors;
1129 
1130     /*
1131      * First indicate to the guest that a CD has been removed.  That's
1132      * done on the next command the guest sends us.
1133      *
1134      * Then we set UNIT_ATTENTION, by which the guest will
1135      * detect a new CD in the drive.  See ide_atapi_cmd() for details.
1136      */
1137     s->cdrom_changed = 1;
1138     s->events.new_media = true;
1139     s->events.eject_request = false;
1140     ide_set_irq(s->bus);
1141 }
1142 
1143 static void ide_cd_eject_request_cb(void *opaque, bool force)
1144 {
1145     IDEState *s = opaque;
1146 
1147     s->events.eject_request = true;
1148     if (force) {
1149         s->tray_locked = false;
1150     }
1151     ide_set_irq(s->bus);
1152 }
1153 
1154 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1155 {
1156     s->lba48 = lba48;
1157 
1158     /* handle the 'magic' 0 nsector count conversion here. to avoid
1159      * fiddling with the rest of the read logic, we just store the
1160      * full sector count in ->nsector and ignore ->hob_nsector from now
1161      */
1162     if (!s->lba48) {
1163 	if (!s->nsector)
1164 	    s->nsector = 256;
1165     } else {
1166 	if (!s->nsector && !s->hob_nsector)
1167 	    s->nsector = 65536;
1168 	else {
1169 	    int lo = s->nsector;
1170 	    int hi = s->hob_nsector;
1171 
1172 	    s->nsector = (hi << 8) | lo;
1173 	}
1174     }
1175 }
1176 
1177 static void ide_clear_hob(IDEBus *bus)
1178 {
1179     /* any write clears HOB high bit of device control register */
1180     bus->ifs[0].select &= ~(1 << 7);
1181     bus->ifs[1].select &= ~(1 << 7);
1182 }
1183 
1184 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1185 {
1186     IDEBus *bus = opaque;
1187 
1188 #ifdef DEBUG_IDE
1189     printf("IDE: write addr=0x%x val=0x%02x\n", addr, val);
1190 #endif
1191 
1192     addr &= 7;
1193 
1194     /* ignore writes to command block while busy with previous command */
1195     if (addr != 7 && (idebus_active_if(bus)->status & (BUSY_STAT|DRQ_STAT)))
1196         return;
1197 
1198     switch(addr) {
1199     case 0:
1200         break;
1201     case 1:
1202 	ide_clear_hob(bus);
1203         /* NOTE: data is written to the two drives */
1204 	bus->ifs[0].hob_feature = bus->ifs[0].feature;
1205 	bus->ifs[1].hob_feature = bus->ifs[1].feature;
1206         bus->ifs[0].feature = val;
1207         bus->ifs[1].feature = val;
1208         break;
1209     case 2:
1210 	ide_clear_hob(bus);
1211 	bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1212 	bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1213         bus->ifs[0].nsector = val;
1214         bus->ifs[1].nsector = val;
1215         break;
1216     case 3:
1217 	ide_clear_hob(bus);
1218 	bus->ifs[0].hob_sector = bus->ifs[0].sector;
1219 	bus->ifs[1].hob_sector = bus->ifs[1].sector;
1220         bus->ifs[0].sector = val;
1221         bus->ifs[1].sector = val;
1222         break;
1223     case 4:
1224 	ide_clear_hob(bus);
1225 	bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1226 	bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1227         bus->ifs[0].lcyl = val;
1228         bus->ifs[1].lcyl = val;
1229         break;
1230     case 5:
1231 	ide_clear_hob(bus);
1232 	bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1233 	bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1234         bus->ifs[0].hcyl = val;
1235         bus->ifs[1].hcyl = val;
1236         break;
1237     case 6:
1238 	/* FIXME: HOB readback uses bit 7 */
1239         bus->ifs[0].select = (val & ~0x10) | 0xa0;
1240         bus->ifs[1].select = (val | 0x10) | 0xa0;
1241         /* select drive */
1242         bus->unit = (val >> 4) & 1;
1243         break;
1244     default:
1245     case 7:
1246         /* command */
1247         ide_exec_cmd(bus, val);
1248         break;
1249     }
1250 }
1251 
1252 static void ide_reset(IDEState *s)
1253 {
1254 #ifdef DEBUG_IDE
1255     printf("ide: reset\n");
1256 #endif
1257 
1258     if (s->pio_aiocb) {
1259         blk_aio_cancel(s->pio_aiocb);
1260         s->pio_aiocb = NULL;
1261     }
1262 
1263     if (s->drive_kind == IDE_CFATA)
1264         s->mult_sectors = 0;
1265     else
1266         s->mult_sectors = MAX_MULT_SECTORS;
1267     /* ide regs */
1268     s->feature = 0;
1269     s->error = 0;
1270     s->nsector = 0;
1271     s->sector = 0;
1272     s->lcyl = 0;
1273     s->hcyl = 0;
1274 
1275     /* lba48 */
1276     s->hob_feature = 0;
1277     s->hob_sector = 0;
1278     s->hob_nsector = 0;
1279     s->hob_lcyl = 0;
1280     s->hob_hcyl = 0;
1281 
1282     s->select = 0xa0;
1283     s->status = READY_STAT | SEEK_STAT;
1284 
1285     s->lba48 = 0;
1286 
1287     /* ATAPI specific */
1288     s->sense_key = 0;
1289     s->asc = 0;
1290     s->cdrom_changed = 0;
1291     s->packet_transfer_size = 0;
1292     s->elementary_transfer_size = 0;
1293     s->io_buffer_index = 0;
1294     s->cd_sector_size = 0;
1295     s->atapi_dma = 0;
1296     s->tray_locked = 0;
1297     s->tray_open = 0;
1298     /* ATA DMA state */
1299     s->io_buffer_size = 0;
1300     s->req_nb_sectors = 0;
1301 
1302     ide_set_signature(s);
1303     /* init the transfer handler so that 0xffff is returned on data
1304        accesses */
1305     s->end_transfer_func = ide_dummy_transfer_stop;
1306     ide_dummy_transfer_stop(s);
1307     s->media_changed = 0;
1308 }
1309 
1310 static bool cmd_nop(IDEState *s, uint8_t cmd)
1311 {
1312     return true;
1313 }
1314 
1315 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1316 {
1317     /* Halt PIO (in the DRQ phase), then DMA */
1318     ide_transfer_cancel(s);
1319     ide_cancel_dma_sync(s);
1320 
1321     /* Reset any PIO commands, reset signature, etc */
1322     ide_reset(s);
1323 
1324     /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1325      * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1326     s->status = 0x00;
1327 
1328     /* Do not overwrite status register */
1329     return false;
1330 }
1331 
1332 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1333 {
1334     switch (s->feature) {
1335     case DSM_TRIM:
1336         if (s->blk) {
1337             ide_sector_start_dma(s, IDE_DMA_TRIM);
1338             return false;
1339         }
1340         break;
1341     }
1342 
1343     ide_abort_command(s);
1344     return true;
1345 }
1346 
1347 static bool cmd_identify(IDEState *s, uint8_t cmd)
1348 {
1349     if (s->blk && s->drive_kind != IDE_CD) {
1350         if (s->drive_kind != IDE_CFATA) {
1351             ide_identify(s);
1352         } else {
1353             ide_cfata_identify(s);
1354         }
1355         s->status = READY_STAT | SEEK_STAT;
1356         ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1357         ide_set_irq(s->bus);
1358         return false;
1359     } else {
1360         if (s->drive_kind == IDE_CD) {
1361             ide_set_signature(s);
1362         }
1363         ide_abort_command(s);
1364     }
1365 
1366     return true;
1367 }
1368 
1369 static bool cmd_verify(IDEState *s, uint8_t cmd)
1370 {
1371     bool lba48 = (cmd == WIN_VERIFY_EXT);
1372 
1373     /* do sector number check ? */
1374     ide_cmd_lba48_transform(s, lba48);
1375 
1376     return true;
1377 }
1378 
1379 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1380 {
1381     if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1382         /* Disable Read and Write Multiple */
1383         s->mult_sectors = 0;
1384     } else if ((s->nsector & 0xff) != 0 &&
1385         ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1386          (s->nsector & (s->nsector - 1)) != 0)) {
1387         ide_abort_command(s);
1388     } else {
1389         s->mult_sectors = s->nsector & 0xff;
1390     }
1391 
1392     return true;
1393 }
1394 
1395 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1396 {
1397     bool lba48 = (cmd == WIN_MULTREAD_EXT);
1398 
1399     if (!s->blk || !s->mult_sectors) {
1400         ide_abort_command(s);
1401         return true;
1402     }
1403 
1404     ide_cmd_lba48_transform(s, lba48);
1405     s->req_nb_sectors = s->mult_sectors;
1406     ide_sector_read(s);
1407     return false;
1408 }
1409 
1410 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1411 {
1412     bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1413     int n;
1414 
1415     if (!s->blk || !s->mult_sectors) {
1416         ide_abort_command(s);
1417         return true;
1418     }
1419 
1420     ide_cmd_lba48_transform(s, lba48);
1421 
1422     s->req_nb_sectors = s->mult_sectors;
1423     n = MIN(s->nsector, s->req_nb_sectors);
1424 
1425     s->status = SEEK_STAT | READY_STAT;
1426     ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1427 
1428     s->media_changed = 1;
1429 
1430     return false;
1431 }
1432 
1433 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1434 {
1435     bool lba48 = (cmd == WIN_READ_EXT);
1436 
1437     if (s->drive_kind == IDE_CD) {
1438         ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1439         ide_abort_command(s);
1440         return true;
1441     }
1442 
1443     if (!s->blk) {
1444         ide_abort_command(s);
1445         return true;
1446     }
1447 
1448     ide_cmd_lba48_transform(s, lba48);
1449     s->req_nb_sectors = 1;
1450     ide_sector_read(s);
1451 
1452     return false;
1453 }
1454 
1455 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1456 {
1457     bool lba48 = (cmd == WIN_WRITE_EXT);
1458 
1459     if (!s->blk) {
1460         ide_abort_command(s);
1461         return true;
1462     }
1463 
1464     ide_cmd_lba48_transform(s, lba48);
1465 
1466     s->req_nb_sectors = 1;
1467     s->status = SEEK_STAT | READY_STAT;
1468     ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1469 
1470     s->media_changed = 1;
1471 
1472     return false;
1473 }
1474 
1475 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1476 {
1477     bool lba48 = (cmd == WIN_READDMA_EXT);
1478 
1479     if (!s->blk) {
1480         ide_abort_command(s);
1481         return true;
1482     }
1483 
1484     ide_cmd_lba48_transform(s, lba48);
1485     ide_sector_start_dma(s, IDE_DMA_READ);
1486 
1487     return false;
1488 }
1489 
1490 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1491 {
1492     bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1493 
1494     if (!s->blk) {
1495         ide_abort_command(s);
1496         return true;
1497     }
1498 
1499     ide_cmd_lba48_transform(s, lba48);
1500     ide_sector_start_dma(s, IDE_DMA_WRITE);
1501 
1502     s->media_changed = 1;
1503 
1504     return false;
1505 }
1506 
1507 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1508 {
1509     ide_flush_cache(s);
1510     return false;
1511 }
1512 
1513 static bool cmd_seek(IDEState *s, uint8_t cmd)
1514 {
1515     /* XXX: Check that seek is within bounds */
1516     return true;
1517 }
1518 
1519 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1520 {
1521     bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1522 
1523     /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1524     if (s->nb_sectors == 0) {
1525         ide_abort_command(s);
1526         return true;
1527     }
1528 
1529     ide_cmd_lba48_transform(s, lba48);
1530     ide_set_sector(s, s->nb_sectors - 1);
1531 
1532     return true;
1533 }
1534 
1535 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1536 {
1537     s->nsector = 0xff; /* device active or idle */
1538     return true;
1539 }
1540 
1541 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1542 {
1543     uint16_t *identify_data;
1544 
1545     if (!s->blk) {
1546         ide_abort_command(s);
1547         return true;
1548     }
1549 
1550     /* XXX: valid for CDROM ? */
1551     switch (s->feature) {
1552     case 0x02: /* write cache enable */
1553         blk_set_enable_write_cache(s->blk, true);
1554         identify_data = (uint16_t *)s->identify_data;
1555         put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1556         return true;
1557     case 0x82: /* write cache disable */
1558         blk_set_enable_write_cache(s->blk, false);
1559         identify_data = (uint16_t *)s->identify_data;
1560         put_le16(identify_data + 85, (1 << 14) | 1);
1561         ide_flush_cache(s);
1562         return false;
1563     case 0xcc: /* reverting to power-on defaults enable */
1564     case 0x66: /* reverting to power-on defaults disable */
1565     case 0xaa: /* read look-ahead enable */
1566     case 0x55: /* read look-ahead disable */
1567     case 0x05: /* set advanced power management mode */
1568     case 0x85: /* disable advanced power management mode */
1569     case 0x69: /* NOP */
1570     case 0x67: /* NOP */
1571     case 0x96: /* NOP */
1572     case 0x9a: /* NOP */
1573     case 0x42: /* enable Automatic Acoustic Mode */
1574     case 0xc2: /* disable Automatic Acoustic Mode */
1575         return true;
1576     case 0x03: /* set transfer mode */
1577         {
1578             uint8_t val = s->nsector & 0x07;
1579             identify_data = (uint16_t *)s->identify_data;
1580 
1581             switch (s->nsector >> 3) {
1582             case 0x00: /* pio default */
1583             case 0x01: /* pio mode */
1584                 put_le16(identify_data + 62, 0x07);
1585                 put_le16(identify_data + 63, 0x07);
1586                 put_le16(identify_data + 88, 0x3f);
1587                 break;
1588             case 0x02: /* sigle word dma mode*/
1589                 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1590                 put_le16(identify_data + 63, 0x07);
1591                 put_le16(identify_data + 88, 0x3f);
1592                 break;
1593             case 0x04: /* mdma mode */
1594                 put_le16(identify_data + 62, 0x07);
1595                 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1596                 put_le16(identify_data + 88, 0x3f);
1597                 break;
1598             case 0x08: /* udma mode */
1599                 put_le16(identify_data + 62, 0x07);
1600                 put_le16(identify_data + 63, 0x07);
1601                 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1602                 break;
1603             default:
1604                 goto abort_cmd;
1605             }
1606             return true;
1607         }
1608     }
1609 
1610 abort_cmd:
1611     ide_abort_command(s);
1612     return true;
1613 }
1614 
1615 
1616 /*** ATAPI commands ***/
1617 
1618 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1619 {
1620     ide_atapi_identify(s);
1621     s->status = READY_STAT | SEEK_STAT;
1622     ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1623     ide_set_irq(s->bus);
1624     return false;
1625 }
1626 
1627 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1628 {
1629     ide_set_signature(s);
1630 
1631     if (s->drive_kind == IDE_CD) {
1632         s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1633                         * devices to return a clear status register
1634                         * with READY_STAT *not* set. */
1635         s->error = 0x01;
1636     } else {
1637         s->status = READY_STAT | SEEK_STAT;
1638         /* The bits of the error register are not as usual for this command!
1639          * They are part of the regular output (this is why ERR_STAT isn't set)
1640          * Device 0 passed, Device 1 passed or not present. */
1641         s->error = 0x01;
1642         ide_set_irq(s->bus);
1643     }
1644 
1645     return false;
1646 }
1647 
1648 static bool cmd_packet(IDEState *s, uint8_t cmd)
1649 {
1650     /* overlapping commands not supported */
1651     if (s->feature & 0x02) {
1652         ide_abort_command(s);
1653         return true;
1654     }
1655 
1656     s->status = READY_STAT | SEEK_STAT;
1657     s->atapi_dma = s->feature & 1;
1658     if (s->atapi_dma) {
1659         s->dma_cmd = IDE_DMA_ATAPI;
1660     }
1661     s->nsector = 1;
1662     ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1663                        ide_atapi_cmd);
1664     return false;
1665 }
1666 
1667 
1668 /*** CF-ATA commands ***/
1669 
1670 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1671 {
1672     s->error = 0x09;    /* miscellaneous error */
1673     s->status = READY_STAT | SEEK_STAT;
1674     ide_set_irq(s->bus);
1675 
1676     return false;
1677 }
1678 
1679 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1680 {
1681     /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1682      * required for Windows 8 to work with AHCI */
1683 
1684     if (cmd == CFA_WEAR_LEVEL) {
1685         s->nsector = 0;
1686     }
1687 
1688     if (cmd == CFA_ERASE_SECTORS) {
1689         s->media_changed = 1;
1690     }
1691 
1692     return true;
1693 }
1694 
1695 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1696 {
1697     s->status = READY_STAT | SEEK_STAT;
1698 
1699     memset(s->io_buffer, 0, 0x200);
1700     s->io_buffer[0x00] = s->hcyl;                   /* Cyl MSB */
1701     s->io_buffer[0x01] = s->lcyl;                   /* Cyl LSB */
1702     s->io_buffer[0x02] = s->select;                 /* Head */
1703     s->io_buffer[0x03] = s->sector;                 /* Sector */
1704     s->io_buffer[0x04] = ide_get_sector(s) >> 16;   /* LBA MSB */
1705     s->io_buffer[0x05] = ide_get_sector(s) >> 8;    /* LBA */
1706     s->io_buffer[0x06] = ide_get_sector(s) >> 0;    /* LBA LSB */
1707     s->io_buffer[0x13] = 0x00;                      /* Erase flag */
1708     s->io_buffer[0x18] = 0x00;                      /* Hot count */
1709     s->io_buffer[0x19] = 0x00;                      /* Hot count */
1710     s->io_buffer[0x1a] = 0x01;                      /* Hot count */
1711 
1712     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1713     ide_set_irq(s->bus);
1714 
1715     return false;
1716 }
1717 
1718 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1719 {
1720     switch (s->feature) {
1721     case 0x02:  /* Inquiry Metadata Storage */
1722         ide_cfata_metadata_inquiry(s);
1723         break;
1724     case 0x03:  /* Read Metadata Storage */
1725         ide_cfata_metadata_read(s);
1726         break;
1727     case 0x04:  /* Write Metadata Storage */
1728         ide_cfata_metadata_write(s);
1729         break;
1730     default:
1731         ide_abort_command(s);
1732         return true;
1733     }
1734 
1735     ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1736     s->status = 0x00; /* NOTE: READY is _not_ set */
1737     ide_set_irq(s->bus);
1738 
1739     return false;
1740 }
1741 
1742 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1743 {
1744     switch (s->feature) {
1745     case 0x01:  /* sense temperature in device */
1746         s->nsector = 0x50;      /* +20 C */
1747         break;
1748     default:
1749         ide_abort_command(s);
1750         return true;
1751     }
1752 
1753     return true;
1754 }
1755 
1756 
1757 /*** SMART commands ***/
1758 
1759 static bool cmd_smart(IDEState *s, uint8_t cmd)
1760 {
1761     int n;
1762 
1763     if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1764         goto abort_cmd;
1765     }
1766 
1767     if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1768         goto abort_cmd;
1769     }
1770 
1771     switch (s->feature) {
1772     case SMART_DISABLE:
1773         s->smart_enabled = 0;
1774         return true;
1775 
1776     case SMART_ENABLE:
1777         s->smart_enabled = 1;
1778         return true;
1779 
1780     case SMART_ATTR_AUTOSAVE:
1781         switch (s->sector) {
1782         case 0x00:
1783             s->smart_autosave = 0;
1784             break;
1785         case 0xf1:
1786             s->smart_autosave = 1;
1787             break;
1788         default:
1789             goto abort_cmd;
1790         }
1791         return true;
1792 
1793     case SMART_STATUS:
1794         if (!s->smart_errors) {
1795             s->hcyl = 0xc2;
1796             s->lcyl = 0x4f;
1797         } else {
1798             s->hcyl = 0x2c;
1799             s->lcyl = 0xf4;
1800         }
1801         return true;
1802 
1803     case SMART_READ_THRESH:
1804         memset(s->io_buffer, 0, 0x200);
1805         s->io_buffer[0] = 0x01; /* smart struct version */
1806 
1807         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1808             s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1809             s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1810         }
1811 
1812         /* checksum */
1813         for (n = 0; n < 511; n++) {
1814             s->io_buffer[511] += s->io_buffer[n];
1815         }
1816         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1817 
1818         s->status = READY_STAT | SEEK_STAT;
1819         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1820         ide_set_irq(s->bus);
1821         return false;
1822 
1823     case SMART_READ_DATA:
1824         memset(s->io_buffer, 0, 0x200);
1825         s->io_buffer[0] = 0x01; /* smart struct version */
1826 
1827         for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1828             int i;
1829             for (i = 0; i < 11; i++) {
1830                 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1831             }
1832         }
1833 
1834         s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1835         if (s->smart_selftest_count == 0) {
1836             s->io_buffer[363] = 0;
1837         } else {
1838             s->io_buffer[363] =
1839                 s->smart_selftest_data[3 +
1840                            (s->smart_selftest_count - 1) *
1841                            24];
1842         }
1843         s->io_buffer[364] = 0x20;
1844         s->io_buffer[365] = 0x01;
1845         /* offline data collection capacity: execute + self-test*/
1846         s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1847         s->io_buffer[368] = 0x03; /* smart capability (1) */
1848         s->io_buffer[369] = 0x00; /* smart capability (2) */
1849         s->io_buffer[370] = 0x01; /* error logging supported */
1850         s->io_buffer[372] = 0x02; /* minutes for poll short test */
1851         s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1852         s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1853 
1854         for (n = 0; n < 511; n++) {
1855             s->io_buffer[511] += s->io_buffer[n];
1856         }
1857         s->io_buffer[511] = 0x100 - s->io_buffer[511];
1858 
1859         s->status = READY_STAT | SEEK_STAT;
1860         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1861         ide_set_irq(s->bus);
1862         return false;
1863 
1864     case SMART_READ_LOG:
1865         switch (s->sector) {
1866         case 0x01: /* summary smart error log */
1867             memset(s->io_buffer, 0, 0x200);
1868             s->io_buffer[0] = 0x01;
1869             s->io_buffer[1] = 0x00; /* no error entries */
1870             s->io_buffer[452] = s->smart_errors & 0xff;
1871             s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1872 
1873             for (n = 0; n < 511; n++) {
1874                 s->io_buffer[511] += s->io_buffer[n];
1875             }
1876             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1877             break;
1878         case 0x06: /* smart self test log */
1879             memset(s->io_buffer, 0, 0x200);
1880             s->io_buffer[0] = 0x01;
1881             if (s->smart_selftest_count == 0) {
1882                 s->io_buffer[508] = 0;
1883             } else {
1884                 s->io_buffer[508] = s->smart_selftest_count;
1885                 for (n = 2; n < 506; n++)  {
1886                     s->io_buffer[n] = s->smart_selftest_data[n];
1887                 }
1888             }
1889 
1890             for (n = 0; n < 511; n++) {
1891                 s->io_buffer[511] += s->io_buffer[n];
1892             }
1893             s->io_buffer[511] = 0x100 - s->io_buffer[511];
1894             break;
1895         default:
1896             goto abort_cmd;
1897         }
1898         s->status = READY_STAT | SEEK_STAT;
1899         ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1900         ide_set_irq(s->bus);
1901         return false;
1902 
1903     case SMART_EXECUTE_OFFLINE:
1904         switch (s->sector) {
1905         case 0: /* off-line routine */
1906         case 1: /* short self test */
1907         case 2: /* extended self test */
1908             s->smart_selftest_count++;
1909             if (s->smart_selftest_count > 21) {
1910                 s->smart_selftest_count = 1;
1911             }
1912             n = 2 + (s->smart_selftest_count - 1) * 24;
1913             s->smart_selftest_data[n] = s->sector;
1914             s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1915             s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1916             s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1917             break;
1918         default:
1919             goto abort_cmd;
1920         }
1921         return true;
1922     }
1923 
1924 abort_cmd:
1925     ide_abort_command(s);
1926     return true;
1927 }
1928 
1929 #define HD_OK (1u << IDE_HD)
1930 #define CD_OK (1u << IDE_CD)
1931 #define CFA_OK (1u << IDE_CFATA)
1932 #define HD_CFA_OK (HD_OK | CFA_OK)
1933 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1934 
1935 /* Set the Disk Seek Completed status bit during completion */
1936 #define SET_DSC (1u << 8)
1937 
1938 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1939 static const struct {
1940     /* Returns true if the completion code should be run */
1941     bool (*handler)(IDEState *s, uint8_t cmd);
1942     int flags;
1943 } ide_cmd_table[0x100] = {
1944     /* NOP not implemented, mandatory for CD */
1945     [CFA_REQ_EXT_ERROR_CODE]      = { cmd_cfa_req_ext_error_code, CFA_OK },
1946     [WIN_DSM]                     = { cmd_data_set_management, HD_CFA_OK },
1947     [WIN_DEVICE_RESET]            = { cmd_device_reset, CD_OK },
1948     [WIN_RECAL]                   = { cmd_nop, HD_CFA_OK | SET_DSC},
1949     [WIN_READ]                    = { cmd_read_pio, ALL_OK },
1950     [WIN_READ_ONCE]               = { cmd_read_pio, HD_CFA_OK },
1951     [WIN_READ_EXT]                = { cmd_read_pio, HD_CFA_OK },
1952     [WIN_READDMA_EXT]             = { cmd_read_dma, HD_CFA_OK },
1953     [WIN_READ_NATIVE_MAX_EXT]     = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1954     [WIN_MULTREAD_EXT]            = { cmd_read_multiple, HD_CFA_OK },
1955     [WIN_WRITE]                   = { cmd_write_pio, HD_CFA_OK },
1956     [WIN_WRITE_ONCE]              = { cmd_write_pio, HD_CFA_OK },
1957     [WIN_WRITE_EXT]               = { cmd_write_pio, HD_CFA_OK },
1958     [WIN_WRITEDMA_EXT]            = { cmd_write_dma, HD_CFA_OK },
1959     [CFA_WRITE_SECT_WO_ERASE]     = { cmd_write_pio, CFA_OK },
1960     [WIN_MULTWRITE_EXT]           = { cmd_write_multiple, HD_CFA_OK },
1961     [WIN_WRITE_VERIFY]            = { cmd_write_pio, HD_CFA_OK },
1962     [WIN_VERIFY]                  = { cmd_verify, HD_CFA_OK | SET_DSC },
1963     [WIN_VERIFY_ONCE]             = { cmd_verify, HD_CFA_OK | SET_DSC },
1964     [WIN_VERIFY_EXT]              = { cmd_verify, HD_CFA_OK | SET_DSC },
1965     [WIN_SEEK]                    = { cmd_seek, HD_CFA_OK | SET_DSC },
1966     [CFA_TRANSLATE_SECTOR]        = { cmd_cfa_translate_sector, CFA_OK },
1967     [WIN_DIAGNOSE]                = { cmd_exec_dev_diagnostic, ALL_OK },
1968     [WIN_SPECIFY]                 = { cmd_nop, HD_CFA_OK | SET_DSC },
1969     [WIN_STANDBYNOW2]             = { cmd_nop, HD_CFA_OK },
1970     [WIN_IDLEIMMEDIATE2]          = { cmd_nop, HD_CFA_OK },
1971     [WIN_STANDBY2]                = { cmd_nop, HD_CFA_OK },
1972     [WIN_SETIDLE2]                = { cmd_nop, HD_CFA_OK },
1973     [WIN_CHECKPOWERMODE2]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1974     [WIN_SLEEPNOW2]               = { cmd_nop, HD_CFA_OK },
1975     [WIN_PACKETCMD]               = { cmd_packet, CD_OK },
1976     [WIN_PIDENTIFY]               = { cmd_identify_packet, CD_OK },
1977     [WIN_SMART]                   = { cmd_smart, HD_CFA_OK | SET_DSC },
1978     [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
1979     [CFA_ERASE_SECTORS]           = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
1980     [WIN_MULTREAD]                = { cmd_read_multiple, HD_CFA_OK },
1981     [WIN_MULTWRITE]               = { cmd_write_multiple, HD_CFA_OK },
1982     [WIN_SETMULT]                 = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
1983     [WIN_READDMA]                 = { cmd_read_dma, HD_CFA_OK },
1984     [WIN_READDMA_ONCE]            = { cmd_read_dma, HD_CFA_OK },
1985     [WIN_WRITEDMA]                = { cmd_write_dma, HD_CFA_OK },
1986     [WIN_WRITEDMA_ONCE]           = { cmd_write_dma, HD_CFA_OK },
1987     [CFA_WRITE_MULTI_WO_ERASE]    = { cmd_write_multiple, CFA_OK },
1988     [WIN_STANDBYNOW1]             = { cmd_nop, HD_CFA_OK },
1989     [WIN_IDLEIMMEDIATE]           = { cmd_nop, HD_CFA_OK },
1990     [WIN_STANDBY]                 = { cmd_nop, HD_CFA_OK },
1991     [WIN_SETIDLE1]                = { cmd_nop, HD_CFA_OK },
1992     [WIN_CHECKPOWERMODE1]         = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1993     [WIN_SLEEPNOW1]               = { cmd_nop, HD_CFA_OK },
1994     [WIN_FLUSH_CACHE]             = { cmd_flush_cache, ALL_OK },
1995     [WIN_FLUSH_CACHE_EXT]         = { cmd_flush_cache, HD_CFA_OK },
1996     [WIN_IDENTIFY]                = { cmd_identify, ALL_OK },
1997     [WIN_SETFEATURES]             = { cmd_set_features, ALL_OK | SET_DSC },
1998     [IBM_SENSE_CONDITION]         = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
1999     [CFA_WEAR_LEVEL]              = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2000     [WIN_READ_NATIVE_MAX]         = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2001 };
2002 
2003 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2004 {
2005     return cmd < ARRAY_SIZE(ide_cmd_table)
2006         && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2007 }
2008 
2009 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2010 {
2011     IDEState *s;
2012     bool complete;
2013 
2014 #if defined(DEBUG_IDE)
2015     printf("ide: CMD=%02x\n", val);
2016 #endif
2017     s = idebus_active_if(bus);
2018     /* ignore commands to non existent slave */
2019     if (s != bus->ifs && !s->blk) {
2020         return;
2021     }
2022 
2023     /* Only RESET is allowed while BSY and/or DRQ are set,
2024      * and only to ATAPI devices. */
2025     if (s->status & (BUSY_STAT|DRQ_STAT)) {
2026         if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2027             return;
2028         }
2029     }
2030 
2031     if (!ide_cmd_permitted(s, val)) {
2032         ide_abort_command(s);
2033         ide_set_irq(s->bus);
2034         return;
2035     }
2036 
2037     s->status = READY_STAT | BUSY_STAT;
2038     s->error = 0;
2039     s->io_buffer_offset = 0;
2040 
2041     complete = ide_cmd_table[val].handler(s, val);
2042     if (complete) {
2043         s->status &= ~BUSY_STAT;
2044         assert(!!s->error == !!(s->status & ERR_STAT));
2045 
2046         if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2047             s->status |= SEEK_STAT;
2048         }
2049 
2050         ide_cmd_done(s);
2051         ide_set_irq(s->bus);
2052     }
2053 }
2054 
2055 uint32_t ide_ioport_read(void *opaque, uint32_t addr1)
2056 {
2057     IDEBus *bus = opaque;
2058     IDEState *s = idebus_active_if(bus);
2059     uint32_t addr;
2060     int ret, hob;
2061 
2062     addr = addr1 & 7;
2063     /* FIXME: HOB readback uses bit 7, but it's always set right now */
2064     //hob = s->select & (1 << 7);
2065     hob = 0;
2066     switch(addr) {
2067     case 0:
2068         ret = 0xff;
2069         break;
2070     case 1:
2071         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2072             (s != bus->ifs && !s->blk)) {
2073             ret = 0;
2074         } else if (!hob) {
2075             ret = s->error;
2076         } else {
2077 	    ret = s->hob_feature;
2078         }
2079         break;
2080     case 2:
2081         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2082             ret = 0;
2083         } else if (!hob) {
2084             ret = s->nsector & 0xff;
2085         } else {
2086 	    ret = s->hob_nsector;
2087         }
2088         break;
2089     case 3:
2090         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2091             ret = 0;
2092         } else if (!hob) {
2093             ret = s->sector;
2094         } else {
2095 	    ret = s->hob_sector;
2096         }
2097         break;
2098     case 4:
2099         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2100             ret = 0;
2101         } else if (!hob) {
2102             ret = s->lcyl;
2103         } else {
2104 	    ret = s->hob_lcyl;
2105         }
2106         break;
2107     case 5:
2108         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2109             ret = 0;
2110         } else if (!hob) {
2111             ret = s->hcyl;
2112         } else {
2113 	    ret = s->hob_hcyl;
2114         }
2115         break;
2116     case 6:
2117         if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2118             ret = 0;
2119         } else {
2120             ret = s->select;
2121         }
2122         break;
2123     default:
2124     case 7:
2125         if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2126             (s != bus->ifs && !s->blk)) {
2127             ret = 0;
2128         } else {
2129             ret = s->status;
2130         }
2131         qemu_irq_lower(bus->irq);
2132         break;
2133     }
2134 #ifdef DEBUG_IDE
2135     printf("ide: read addr=0x%x val=%02x\n", addr1, ret);
2136 #endif
2137     return ret;
2138 }
2139 
2140 uint32_t ide_status_read(void *opaque, uint32_t addr)
2141 {
2142     IDEBus *bus = opaque;
2143     IDEState *s = idebus_active_if(bus);
2144     int ret;
2145 
2146     if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2147         (s != bus->ifs && !s->blk)) {
2148         ret = 0;
2149     } else {
2150         ret = s->status;
2151     }
2152 #ifdef DEBUG_IDE
2153     printf("ide: read status addr=0x%x val=%02x\n", addr, ret);
2154 #endif
2155     return ret;
2156 }
2157 
2158 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2159 {
2160     IDEBus *bus = opaque;
2161     IDEState *s;
2162     int i;
2163 
2164 #ifdef DEBUG_IDE
2165     printf("ide: write control addr=0x%x val=%02x\n", addr, val);
2166 #endif
2167     /* common for both drives */
2168     if (!(bus->cmd & IDE_CMD_RESET) &&
2169         (val & IDE_CMD_RESET)) {
2170         /* reset low to high */
2171         for(i = 0;i < 2; i++) {
2172             s = &bus->ifs[i];
2173             s->status = BUSY_STAT | SEEK_STAT;
2174             s->error = 0x01;
2175         }
2176     } else if ((bus->cmd & IDE_CMD_RESET) &&
2177                !(val & IDE_CMD_RESET)) {
2178         /* high to low */
2179         for(i = 0;i < 2; i++) {
2180             s = &bus->ifs[i];
2181             if (s->drive_kind == IDE_CD)
2182                 s->status = 0x00; /* NOTE: READY is _not_ set */
2183             else
2184                 s->status = READY_STAT | SEEK_STAT;
2185             ide_set_signature(s);
2186         }
2187     }
2188 
2189     bus->cmd = val;
2190 }
2191 
2192 /*
2193  * Returns true if the running PIO transfer is a PIO out (i.e. data is
2194  * transferred from the device to the guest), false if it's a PIO in
2195  */
2196 static bool ide_is_pio_out(IDEState *s)
2197 {
2198     if (s->end_transfer_func == ide_sector_write ||
2199         s->end_transfer_func == ide_atapi_cmd) {
2200         return false;
2201     } else if (s->end_transfer_func == ide_sector_read ||
2202                s->end_transfer_func == ide_transfer_stop ||
2203                s->end_transfer_func == ide_atapi_cmd_reply_end ||
2204                s->end_transfer_func == ide_dummy_transfer_stop) {
2205         return true;
2206     }
2207 
2208     abort();
2209 }
2210 
2211 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2212 {
2213     IDEBus *bus = opaque;
2214     IDEState *s = idebus_active_if(bus);
2215     uint8_t *p;
2216 
2217     /* PIO data access allowed only when DRQ bit is set. The result of a write
2218      * during PIO out is indeterminate, just ignore it. */
2219     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2220         return;
2221     }
2222 
2223     p = s->data_ptr;
2224     if (p + 2 > s->data_end) {
2225         return;
2226     }
2227 
2228     *(uint16_t *)p = le16_to_cpu(val);
2229     p += 2;
2230     s->data_ptr = p;
2231     if (p >= s->data_end) {
2232         s->status &= ~DRQ_STAT;
2233         s->end_transfer_func(s);
2234     }
2235 }
2236 
2237 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2238 {
2239     IDEBus *bus = opaque;
2240     IDEState *s = idebus_active_if(bus);
2241     uint8_t *p;
2242     int ret;
2243 
2244     /* PIO data access allowed only when DRQ bit is set. The result of a read
2245      * during PIO in is indeterminate, return 0 and don't move forward. */
2246     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2247         return 0;
2248     }
2249 
2250     p = s->data_ptr;
2251     if (p + 2 > s->data_end) {
2252         return 0;
2253     }
2254 
2255     ret = cpu_to_le16(*(uint16_t *)p);
2256     p += 2;
2257     s->data_ptr = p;
2258     if (p >= s->data_end) {
2259         s->status &= ~DRQ_STAT;
2260         s->end_transfer_func(s);
2261     }
2262     return ret;
2263 }
2264 
2265 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2266 {
2267     IDEBus *bus = opaque;
2268     IDEState *s = idebus_active_if(bus);
2269     uint8_t *p;
2270 
2271     /* PIO data access allowed only when DRQ bit is set. The result of a write
2272      * during PIO out is indeterminate, just ignore it. */
2273     if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2274         return;
2275     }
2276 
2277     p = s->data_ptr;
2278     if (p + 4 > s->data_end) {
2279         return;
2280     }
2281 
2282     *(uint32_t *)p = le32_to_cpu(val);
2283     p += 4;
2284     s->data_ptr = p;
2285     if (p >= s->data_end) {
2286         s->status &= ~DRQ_STAT;
2287         s->end_transfer_func(s);
2288     }
2289 }
2290 
2291 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2292 {
2293     IDEBus *bus = opaque;
2294     IDEState *s = idebus_active_if(bus);
2295     uint8_t *p;
2296     int ret;
2297 
2298     /* PIO data access allowed only when DRQ bit is set. The result of a read
2299      * during PIO in is indeterminate, return 0 and don't move forward. */
2300     if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2301         return 0;
2302     }
2303 
2304     p = s->data_ptr;
2305     if (p + 4 > s->data_end) {
2306         return 0;
2307     }
2308 
2309     ret = cpu_to_le32(*(uint32_t *)p);
2310     p += 4;
2311     s->data_ptr = p;
2312     if (p >= s->data_end) {
2313         s->status &= ~DRQ_STAT;
2314         s->end_transfer_func(s);
2315     }
2316     return ret;
2317 }
2318 
2319 static void ide_dummy_transfer_stop(IDEState *s)
2320 {
2321     s->data_ptr = s->io_buffer;
2322     s->data_end = s->io_buffer;
2323     s->io_buffer[0] = 0xff;
2324     s->io_buffer[1] = 0xff;
2325     s->io_buffer[2] = 0xff;
2326     s->io_buffer[3] = 0xff;
2327 }
2328 
2329 void ide_bus_reset(IDEBus *bus)
2330 {
2331     bus->unit = 0;
2332     bus->cmd = 0;
2333     ide_reset(&bus->ifs[0]);
2334     ide_reset(&bus->ifs[1]);
2335     ide_clear_hob(bus);
2336 
2337     /* pending async DMA */
2338     if (bus->dma->aiocb) {
2339 #ifdef DEBUG_AIO
2340         printf("aio_cancel\n");
2341 #endif
2342         blk_aio_cancel(bus->dma->aiocb);
2343         bus->dma->aiocb = NULL;
2344     }
2345 
2346     /* reset dma provider too */
2347     if (bus->dma->ops->reset) {
2348         bus->dma->ops->reset(bus->dma);
2349     }
2350 }
2351 
2352 static bool ide_cd_is_tray_open(void *opaque)
2353 {
2354     return ((IDEState *)opaque)->tray_open;
2355 }
2356 
2357 static bool ide_cd_is_medium_locked(void *opaque)
2358 {
2359     return ((IDEState *)opaque)->tray_locked;
2360 }
2361 
2362 static void ide_resize_cb(void *opaque)
2363 {
2364     IDEState *s = opaque;
2365     uint64_t nb_sectors;
2366 
2367     if (!s->identify_set) {
2368         return;
2369     }
2370 
2371     blk_get_geometry(s->blk, &nb_sectors);
2372     s->nb_sectors = nb_sectors;
2373 
2374     /* Update the identify data buffer. */
2375     if (s->drive_kind == IDE_CFATA) {
2376         ide_cfata_identify_size(s);
2377     } else {
2378         /* IDE_CD uses a different set of callbacks entirely. */
2379         assert(s->drive_kind != IDE_CD);
2380         ide_identify_size(s);
2381     }
2382 }
2383 
2384 static const BlockDevOps ide_cd_block_ops = {
2385     .change_media_cb = ide_cd_change_cb,
2386     .eject_request_cb = ide_cd_eject_request_cb,
2387     .is_tray_open = ide_cd_is_tray_open,
2388     .is_medium_locked = ide_cd_is_medium_locked,
2389 };
2390 
2391 static const BlockDevOps ide_hd_block_ops = {
2392     .resize_cb = ide_resize_cb,
2393 };
2394 
2395 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2396                    const char *version, const char *serial, const char *model,
2397                    uint64_t wwn,
2398                    uint32_t cylinders, uint32_t heads, uint32_t secs,
2399                    int chs_trans)
2400 {
2401     uint64_t nb_sectors;
2402 
2403     s->blk = blk;
2404     s->drive_kind = kind;
2405 
2406     blk_get_geometry(blk, &nb_sectors);
2407     s->cylinders = cylinders;
2408     s->heads = heads;
2409     s->sectors = secs;
2410     s->chs_trans = chs_trans;
2411     s->nb_sectors = nb_sectors;
2412     s->wwn = wwn;
2413     /* The SMART values should be preserved across power cycles
2414        but they aren't.  */
2415     s->smart_enabled = 1;
2416     s->smart_autosave = 1;
2417     s->smart_errors = 0;
2418     s->smart_selftest_count = 0;
2419     if (kind == IDE_CD) {
2420         blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2421         blk_set_guest_block_size(blk, 2048);
2422     } else {
2423         if (!blk_is_inserted(s->blk)) {
2424             error_report("Device needs media, but drive is empty");
2425             return -1;
2426         }
2427         if (blk_is_read_only(blk)) {
2428             error_report("Can't use a read-only drive");
2429             return -1;
2430         }
2431         blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2432     }
2433     if (serial) {
2434         pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2435     } else {
2436         snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2437                  "QM%05d", s->drive_serial);
2438     }
2439     if (model) {
2440         pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2441     } else {
2442         switch (kind) {
2443         case IDE_CD:
2444             strcpy(s->drive_model_str, "QEMU DVD-ROM");
2445             break;
2446         case IDE_CFATA:
2447             strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2448             break;
2449         default:
2450             strcpy(s->drive_model_str, "QEMU HARDDISK");
2451             break;
2452         }
2453     }
2454 
2455     if (version) {
2456         pstrcpy(s->version, sizeof(s->version), version);
2457     } else {
2458         pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2459     }
2460 
2461     ide_reset(s);
2462     blk_iostatus_enable(blk);
2463     return 0;
2464 }
2465 
2466 static void ide_init1(IDEBus *bus, int unit)
2467 {
2468     static int drive_serial = 1;
2469     IDEState *s = &bus->ifs[unit];
2470 
2471     s->bus = bus;
2472     s->unit = unit;
2473     s->drive_serial = drive_serial++;
2474     /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2475     s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2476     s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2477     memset(s->io_buffer, 0, s->io_buffer_total_len);
2478 
2479     s->smart_selftest_data = blk_blockalign(s->blk, 512);
2480     memset(s->smart_selftest_data, 0, 512);
2481 
2482     s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2483                                            ide_sector_write_timer_cb, s);
2484 }
2485 
2486 static int ide_nop_int(IDEDMA *dma, int x)
2487 {
2488     return 0;
2489 }
2490 
2491 static void ide_nop(IDEDMA *dma)
2492 {
2493 }
2494 
2495 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2496 {
2497     return 0;
2498 }
2499 
2500 static const IDEDMAOps ide_dma_nop_ops = {
2501     .prepare_buf    = ide_nop_int32,
2502     .restart_dma    = ide_nop,
2503     .rw_buf         = ide_nop_int,
2504 };
2505 
2506 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2507 {
2508     s->unit = s->bus->retry_unit;
2509     ide_set_sector(s, s->bus->retry_sector_num);
2510     s->nsector = s->bus->retry_nsector;
2511     s->bus->dma->ops->restart_dma(s->bus->dma);
2512     s->io_buffer_size = 0;
2513     s->dma_cmd = dma_cmd;
2514     ide_start_dma(s, ide_dma_cb);
2515 }
2516 
2517 static void ide_restart_bh(void *opaque)
2518 {
2519     IDEBus *bus = opaque;
2520     IDEState *s;
2521     bool is_read;
2522     int error_status;
2523 
2524     qemu_bh_delete(bus->bh);
2525     bus->bh = NULL;
2526 
2527     error_status = bus->error_status;
2528     if (bus->error_status == 0) {
2529         return;
2530     }
2531 
2532     s = idebus_active_if(bus);
2533     is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2534 
2535     /* The error status must be cleared before resubmitting the request: The
2536      * request may fail again, and this case can only be distinguished if the
2537      * called function can set a new error status. */
2538     bus->error_status = 0;
2539 
2540     /* The HBA has generically asked to be kicked on retry */
2541     if (error_status & IDE_RETRY_HBA) {
2542         if (s->bus->dma->ops->restart) {
2543             s->bus->dma->ops->restart(s->bus->dma);
2544         }
2545     } else if (IS_IDE_RETRY_DMA(error_status)) {
2546         if (error_status & IDE_RETRY_TRIM) {
2547             ide_restart_dma(s, IDE_DMA_TRIM);
2548         } else {
2549             ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2550         }
2551     } else if (IS_IDE_RETRY_PIO(error_status)) {
2552         if (is_read) {
2553             ide_sector_read(s);
2554         } else {
2555             ide_sector_write(s);
2556         }
2557     } else if (error_status & IDE_RETRY_FLUSH) {
2558         ide_flush_cache(s);
2559     } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2560         assert(s->end_transfer_func == ide_atapi_cmd);
2561         ide_atapi_dma_restart(s);
2562     } else {
2563         abort();
2564     }
2565 }
2566 
2567 static void ide_restart_cb(void *opaque, int running, RunState state)
2568 {
2569     IDEBus *bus = opaque;
2570 
2571     if (!running)
2572         return;
2573 
2574     if (!bus->bh) {
2575         bus->bh = qemu_bh_new(ide_restart_bh, bus);
2576         qemu_bh_schedule(bus->bh);
2577     }
2578 }
2579 
2580 void ide_register_restart_cb(IDEBus *bus)
2581 {
2582     if (bus->dma->ops->restart_dma) {
2583         qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2584     }
2585 }
2586 
2587 static IDEDMA ide_dma_nop = {
2588     .ops = &ide_dma_nop_ops,
2589     .aiocb = NULL,
2590 };
2591 
2592 void ide_init2(IDEBus *bus, qemu_irq irq)
2593 {
2594     int i;
2595 
2596     for(i = 0; i < 2; i++) {
2597         ide_init1(bus, i);
2598         ide_reset(&bus->ifs[i]);
2599     }
2600     bus->irq = irq;
2601     bus->dma = &ide_dma_nop;
2602 }
2603 
2604 static const MemoryRegionPortio ide_portio_list[] = {
2605     { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2606     { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2607     { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2608     PORTIO_END_OF_LIST(),
2609 };
2610 
2611 static const MemoryRegionPortio ide_portio2_list[] = {
2612     { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2613     PORTIO_END_OF_LIST(),
2614 };
2615 
2616 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2617 {
2618     /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2619        bridge has been setup properly to always register with ISA.  */
2620     isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide");
2621 
2622     if (iobase2) {
2623         isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide");
2624     }
2625 }
2626 
2627 static bool is_identify_set(void *opaque, int version_id)
2628 {
2629     IDEState *s = opaque;
2630 
2631     return s->identify_set != 0;
2632 }
2633 
2634 static EndTransferFunc* transfer_end_table[] = {
2635         ide_sector_read,
2636         ide_sector_write,
2637         ide_transfer_stop,
2638         ide_atapi_cmd_reply_end,
2639         ide_atapi_cmd,
2640         ide_dummy_transfer_stop,
2641 };
2642 
2643 static int transfer_end_table_idx(EndTransferFunc *fn)
2644 {
2645     int i;
2646 
2647     for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2648         if (transfer_end_table[i] == fn)
2649             return i;
2650 
2651     return -1;
2652 }
2653 
2654 static int ide_drive_post_load(void *opaque, int version_id)
2655 {
2656     IDEState *s = opaque;
2657 
2658     if (s->blk && s->identify_set) {
2659         blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2660     }
2661     return 0;
2662 }
2663 
2664 static int ide_drive_pio_post_load(void *opaque, int version_id)
2665 {
2666     IDEState *s = opaque;
2667 
2668     if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2669         return -EINVAL;
2670     }
2671     s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2672     s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2673     s->data_end = s->data_ptr + s->cur_io_buffer_len;
2674     s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2675 
2676     return 0;
2677 }
2678 
2679 static void ide_drive_pio_pre_save(void *opaque)
2680 {
2681     IDEState *s = opaque;
2682     int idx;
2683 
2684     s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2685     s->cur_io_buffer_len = s->data_end - s->data_ptr;
2686 
2687     idx = transfer_end_table_idx(s->end_transfer_func);
2688     if (idx == -1) {
2689         fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2690                         __func__);
2691         s->end_transfer_fn_idx = 2;
2692     } else {
2693         s->end_transfer_fn_idx = idx;
2694     }
2695 }
2696 
2697 static bool ide_drive_pio_state_needed(void *opaque)
2698 {
2699     IDEState *s = opaque;
2700 
2701     return ((s->status & DRQ_STAT) != 0)
2702         || (s->bus->error_status & IDE_RETRY_PIO);
2703 }
2704 
2705 static bool ide_tray_state_needed(void *opaque)
2706 {
2707     IDEState *s = opaque;
2708 
2709     return s->tray_open || s->tray_locked;
2710 }
2711 
2712 static bool ide_atapi_gesn_needed(void *opaque)
2713 {
2714     IDEState *s = opaque;
2715 
2716     return s->events.new_media || s->events.eject_request;
2717 }
2718 
2719 static bool ide_error_needed(void *opaque)
2720 {
2721     IDEBus *bus = opaque;
2722 
2723     return (bus->error_status != 0);
2724 }
2725 
2726 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2727 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2728     .name ="ide_drive/atapi/gesn_state",
2729     .version_id = 1,
2730     .minimum_version_id = 1,
2731     .needed = ide_atapi_gesn_needed,
2732     .fields = (VMStateField[]) {
2733         VMSTATE_BOOL(events.new_media, IDEState),
2734         VMSTATE_BOOL(events.eject_request, IDEState),
2735         VMSTATE_END_OF_LIST()
2736     }
2737 };
2738 
2739 static const VMStateDescription vmstate_ide_tray_state = {
2740     .name = "ide_drive/tray_state",
2741     .version_id = 1,
2742     .minimum_version_id = 1,
2743     .needed = ide_tray_state_needed,
2744     .fields = (VMStateField[]) {
2745         VMSTATE_BOOL(tray_open, IDEState),
2746         VMSTATE_BOOL(tray_locked, IDEState),
2747         VMSTATE_END_OF_LIST()
2748     }
2749 };
2750 
2751 static const VMStateDescription vmstate_ide_drive_pio_state = {
2752     .name = "ide_drive/pio_state",
2753     .version_id = 1,
2754     .minimum_version_id = 1,
2755     .pre_save = ide_drive_pio_pre_save,
2756     .post_load = ide_drive_pio_post_load,
2757     .needed = ide_drive_pio_state_needed,
2758     .fields = (VMStateField[]) {
2759         VMSTATE_INT32(req_nb_sectors, IDEState),
2760         VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2761 			     vmstate_info_uint8, uint8_t),
2762         VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2763         VMSTATE_INT32(cur_io_buffer_len, IDEState),
2764         VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2765         VMSTATE_INT32(elementary_transfer_size, IDEState),
2766         VMSTATE_INT32(packet_transfer_size, IDEState),
2767         VMSTATE_END_OF_LIST()
2768     }
2769 };
2770 
2771 const VMStateDescription vmstate_ide_drive = {
2772     .name = "ide_drive",
2773     .version_id = 3,
2774     .minimum_version_id = 0,
2775     .post_load = ide_drive_post_load,
2776     .fields = (VMStateField[]) {
2777         VMSTATE_INT32(mult_sectors, IDEState),
2778         VMSTATE_INT32(identify_set, IDEState),
2779         VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2780         VMSTATE_UINT8(feature, IDEState),
2781         VMSTATE_UINT8(error, IDEState),
2782         VMSTATE_UINT32(nsector, IDEState),
2783         VMSTATE_UINT8(sector, IDEState),
2784         VMSTATE_UINT8(lcyl, IDEState),
2785         VMSTATE_UINT8(hcyl, IDEState),
2786         VMSTATE_UINT8(hob_feature, IDEState),
2787         VMSTATE_UINT8(hob_sector, IDEState),
2788         VMSTATE_UINT8(hob_nsector, IDEState),
2789         VMSTATE_UINT8(hob_lcyl, IDEState),
2790         VMSTATE_UINT8(hob_hcyl, IDEState),
2791         VMSTATE_UINT8(select, IDEState),
2792         VMSTATE_UINT8(status, IDEState),
2793         VMSTATE_UINT8(lba48, IDEState),
2794         VMSTATE_UINT8(sense_key, IDEState),
2795         VMSTATE_UINT8(asc, IDEState),
2796         VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2797         VMSTATE_END_OF_LIST()
2798     },
2799     .subsections = (const VMStateDescription*[]) {
2800         &vmstate_ide_drive_pio_state,
2801         &vmstate_ide_tray_state,
2802         &vmstate_ide_atapi_gesn_state,
2803         NULL
2804     }
2805 };
2806 
2807 static const VMStateDescription vmstate_ide_error_status = {
2808     .name ="ide_bus/error",
2809     .version_id = 2,
2810     .minimum_version_id = 1,
2811     .needed = ide_error_needed,
2812     .fields = (VMStateField[]) {
2813         VMSTATE_INT32(error_status, IDEBus),
2814         VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2815         VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2816         VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2817         VMSTATE_END_OF_LIST()
2818     }
2819 };
2820 
2821 const VMStateDescription vmstate_ide_bus = {
2822     .name = "ide_bus",
2823     .version_id = 1,
2824     .minimum_version_id = 1,
2825     .fields = (VMStateField[]) {
2826         VMSTATE_UINT8(cmd, IDEBus),
2827         VMSTATE_UINT8(unit, IDEBus),
2828         VMSTATE_END_OF_LIST()
2829     },
2830     .subsections = (const VMStateDescription*[]) {
2831         &vmstate_ide_error_status,
2832         NULL
2833     }
2834 };
2835 
2836 void ide_drive_get(DriveInfo **hd, int n)
2837 {
2838     int i;
2839     int highest_bus = drive_get_max_bus(IF_IDE) + 1;
2840     int max_devs = drive_get_max_devs(IF_IDE);
2841     int n_buses = max_devs ? (n / max_devs) : n;
2842 
2843     /*
2844      * Note: The number of actual buses available is not known.
2845      * We compute this based on the size of the DriveInfo* array, n.
2846      * If it is less than max_devs * <num_real_buses>,
2847      * We will stop looking for drives prematurely instead of overfilling
2848      * the array.
2849      */
2850 
2851     if (highest_bus > n_buses) {
2852         error_report("Too many IDE buses defined (%d > %d)",
2853                      highest_bus, n_buses);
2854         exit(1);
2855     }
2856 
2857     for (i = 0; i < n; i++) {
2858         hd[i] = drive_get_by_index(IF_IDE, i);
2859     }
2860 }
2861