2 * QEMU IDE disk and CD/DVD-ROM Emulator
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "hw/isa/isa.h"
28 #include "migration/vmstate.h"
29 #include "qemu/error-report.h"
30 #include "qemu/main-loop.h"
31 #include "qemu/timer.h"
32 #include "qemu/hw-version.h"
33 #include "qemu/memalign.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/blockdev.h"
36 #include "sysemu/dma.h"
37 #include "hw/block/block.h"
38 #include "sysemu/block-backend.h"
39 #include "qapi/error.h"
40 #include "qemu/cutils.h"
41 #include "sysemu/replay.h"
42 #include "sysemu/runstate.h"
43 #include "hw/ide/internal.h"
46 /* These values were based on a Seagate ST3500418AS but have been modified
47 to make more sense in QEMU */
48 static const int smart_attributes
[][12] = {
49 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
50 /* raw read error rate*/
51 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
53 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54 /* start stop count */
55 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
56 /* remapped sectors */
57 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
59 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
60 /* power cycle count */
61 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
62 /* airflow-temperature-celsius */
63 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
66 const char *IDE_DMA_CMD_lookup
[IDE_DMA__COUNT
] = {
67 [IDE_DMA_READ
] = "DMA READ",
68 [IDE_DMA_WRITE
] = "DMA WRITE",
69 [IDE_DMA_TRIM
] = "DMA TRIM",
70 [IDE_DMA_ATAPI
] = "DMA ATAPI"
73 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval
)
75 if ((unsigned)enval
< IDE_DMA__COUNT
) {
76 return IDE_DMA_CMD_lookup
[enval
];
78 return "DMA UNKNOWN CMD";
81 static void ide_dummy_transfer_stop(IDEState
*s
);
83 static void padstr(char *str
, const char *src
, int len
)
86 for(i
= 0; i
< len
; i
++) {
95 static void put_le16(uint16_t *p
, unsigned int v
)
100 static void ide_identify_size(IDEState
*s
)
102 uint16_t *p
= (uint16_t *)s
->identify_data
;
103 int64_t nb_sectors_lba28
= s
->nb_sectors
;
104 if (nb_sectors_lba28
>= 1 << 28) {
105 nb_sectors_lba28
= (1 << 28) - 1;
107 put_le16(p
+ 60, nb_sectors_lba28
);
108 put_le16(p
+ 61, nb_sectors_lba28
>> 16);
109 put_le16(p
+ 100, s
->nb_sectors
);
110 put_le16(p
+ 101, s
->nb_sectors
>> 16);
111 put_le16(p
+ 102, s
->nb_sectors
>> 32);
112 put_le16(p
+ 103, s
->nb_sectors
>> 48);
115 static void ide_identify(IDEState
*s
)
118 unsigned int oldsize
;
119 IDEDevice
*dev
= s
->unit
? s
->bus
->slave
: s
->bus
->master
;
121 p
= (uint16_t *)s
->identify_data
;
122 if (s
->identify_set
) {
125 memset(p
, 0, sizeof(s
->identify_data
));
127 put_le16(p
+ 0, 0x0040);
128 put_le16(p
+ 1, s
->cylinders
);
129 put_le16(p
+ 3, s
->heads
);
130 put_le16(p
+ 4, 512 * s
->sectors
); /* XXX: retired, remove ? */
131 put_le16(p
+ 5, 512); /* XXX: retired, remove ? */
132 put_le16(p
+ 6, s
->sectors
);
133 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
134 put_le16(p
+ 20, 3); /* XXX: retired, remove ? */
135 put_le16(p
+ 21, 512); /* cache size in sectors */
136 put_le16(p
+ 22, 4); /* ecc bytes */
137 padstr((char *)(p
+ 23), s
->version
, 8); /* firmware version */
138 padstr((char *)(p
+ 27), s
->drive_model_str
, 40); /* model */
139 #if MAX_MULT_SECTORS > 1
140 put_le16(p
+ 47, 0x8000 | MAX_MULT_SECTORS
);
142 put_le16(p
+ 48, 1); /* dword I/O */
143 put_le16(p
+ 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
144 put_le16(p
+ 51, 0x200); /* PIO transfer cycle */
145 put_le16(p
+ 52, 0x200); /* DMA transfer cycle */
146 put_le16(p
+ 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
147 put_le16(p
+ 54, s
->cylinders
);
148 put_le16(p
+ 55, s
->heads
);
149 put_le16(p
+ 56, s
->sectors
);
150 oldsize
= s
->cylinders
* s
->heads
* s
->sectors
;
151 put_le16(p
+ 57, oldsize
);
152 put_le16(p
+ 58, oldsize
>> 16);
154 put_le16(p
+ 59, 0x100 | s
->mult_sectors
);
155 /* *(p + 60) := nb_sectors -- see ide_identify_size */
156 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
157 put_le16(p
+ 62, 0x07); /* single word dma0-2 supported */
158 put_le16(p
+ 63, 0x07); /* mdma0-2 supported */
159 put_le16(p
+ 64, 0x03); /* pio3-4 supported */
160 put_le16(p
+ 65, 120);
161 put_le16(p
+ 66, 120);
162 put_le16(p
+ 67, 120);
163 put_le16(p
+ 68, 120);
164 if (dev
&& dev
->conf
.discard_granularity
) {
165 put_le16(p
+ 69, (1 << 14)); /* determinate TRIM behavior */
169 put_le16(p
+ 75, s
->ncq_queues
- 1);
171 put_le16(p
+ 76, (1 << 8));
174 put_le16(p
+ 80, 0xf0); /* ata3 -> ata6 supported */
175 put_le16(p
+ 81, 0x16); /* conforms to ata5 */
176 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
177 put_le16(p
+ 82, (1 << 14) | (1 << 5) | 1);
178 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
179 put_le16(p
+ 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
180 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
182 put_le16(p
+ 84, (1 << 14) | (1 << 8) | 0);
184 put_le16(p
+ 84, (1 << 14) | 0);
186 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
187 if (blk_enable_write_cache(s
->blk
)) {
188 put_le16(p
+ 85, (1 << 14) | (1 << 5) | 1);
190 put_le16(p
+ 85, (1 << 14) | 1);
192 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
193 put_le16(p
+ 86, (1 << 13) | (1 <<12) | (1 << 10));
194 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
196 put_le16(p
+ 87, (1 << 14) | (1 << 8) | 0);
198 put_le16(p
+ 87, (1 << 14) | 0);
200 put_le16(p
+ 88, 0x3f | (1 << 13)); /* udma5 set and supported */
201 put_le16(p
+ 93, 1 | (1 << 14) | 0x2000);
202 /* *(p + 100) := nb_sectors -- see ide_identify_size */
203 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
204 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
205 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
207 if (dev
&& dev
->conf
.physical_block_size
)
208 put_le16(p
+ 106, 0x6000 | get_physical_block_exp(&dev
->conf
));
210 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
211 put_le16(p
+ 108, s
->wwn
>> 48);
212 put_le16(p
+ 109, s
->wwn
>> 32);
213 put_le16(p
+ 110, s
->wwn
>> 16);
214 put_le16(p
+ 111, s
->wwn
);
216 if (dev
&& dev
->conf
.discard_granularity
) {
217 put_le16(p
+ 169, 1); /* TRIM support */
220 put_le16(p
+ 217, dev
->rotation_rate
); /* Nominal media rotation rate */
223 ide_identify_size(s
);
227 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
230 static void ide_atapi_identify(IDEState
*s
)
234 p
= (uint16_t *)s
->identify_data
;
235 if (s
->identify_set
) {
238 memset(p
, 0, sizeof(s
->identify_data
));
240 /* Removable CDROM, 50us response, 12 byte packets */
241 put_le16(p
+ 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
242 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
243 put_le16(p
+ 20, 3); /* buffer type */
244 put_le16(p
+ 21, 512); /* cache size in sectors */
245 put_le16(p
+ 22, 4); /* ecc bytes */
246 padstr((char *)(p
+ 23), s
->version
, 8); /* firmware version */
247 padstr((char *)(p
+ 27), s
->drive_model_str
, 40); /* model */
248 put_le16(p
+ 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
250 put_le16(p
+ 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
251 put_le16(p
+ 53, 7); /* words 64-70, 54-58, 88 valid */
252 put_le16(p
+ 62, 7); /* single word dma0-2 supported */
253 put_le16(p
+ 63, 7); /* mdma0-2 supported */
255 put_le16(p
+ 49, 1 << 9); /* LBA supported, no DMA */
256 put_le16(p
+ 53, 3); /* words 64-70, 54-58 valid */
257 put_le16(p
+ 63, 0x103); /* DMA modes XXX: may be incorrect */
259 put_le16(p
+ 64, 3); /* pio3-4 supported */
260 put_le16(p
+ 65, 0xb4); /* minimum DMA multiword tx cycle time */
261 put_le16(p
+ 66, 0xb4); /* recommended DMA multiword tx cycle time */
262 put_le16(p
+ 67, 0x12c); /* minimum PIO cycle time without flow control */
263 put_le16(p
+ 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
265 put_le16(p
+ 71, 30); /* in ns */
266 put_le16(p
+ 72, 30); /* in ns */
269 put_le16(p
+ 75, s
->ncq_queues
- 1);
271 put_le16(p
+ 76, (1 << 8));
274 put_le16(p
+ 80, 0x1e); /* support up to ATA/ATAPI-4 */
276 put_le16(p
+ 84, (1 << 8)); /* supports WWN for words 108-111 */
277 put_le16(p
+ 87, (1 << 8)); /* WWN enabled */
281 put_le16(p
+ 88, 0x3f | (1 << 13)); /* udma5 set and supported */
285 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
286 put_le16(p
+ 108, s
->wwn
>> 48);
287 put_le16(p
+ 109, s
->wwn
>> 32);
288 put_le16(p
+ 110, s
->wwn
>> 16);
289 put_le16(p
+ 111, s
->wwn
);
295 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
298 static void ide_cfata_identify_size(IDEState
*s
)
300 uint16_t *p
= (uint16_t *)s
->identify_data
;
301 put_le16(p
+ 7, s
->nb_sectors
>> 16); /* Sectors per card */
302 put_le16(p
+ 8, s
->nb_sectors
); /* Sectors per card */
303 put_le16(p
+ 60, s
->nb_sectors
); /* Total LBA sectors */
304 put_le16(p
+ 61, s
->nb_sectors
>> 16); /* Total LBA sectors */
307 static void ide_cfata_identify(IDEState
*s
)
312 p
= (uint16_t *)s
->identify_data
;
313 if (s
->identify_set
) {
316 memset(p
, 0, sizeof(s
->identify_data
));
318 cur_sec
= s
->cylinders
* s
->heads
* s
->sectors
;
320 put_le16(p
+ 0, 0x848a); /* CF Storage Card signature */
321 put_le16(p
+ 1, s
->cylinders
); /* Default cylinders */
322 put_le16(p
+ 3, s
->heads
); /* Default heads */
323 put_le16(p
+ 6, s
->sectors
); /* Default sectors per track */
324 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
325 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
326 padstr((char *)(p
+ 10), s
->drive_serial_str
, 20); /* serial number */
327 put_le16(p
+ 22, 0x0004); /* ECC bytes */
328 padstr((char *) (p
+ 23), s
->version
, 8); /* Firmware Revision */
329 padstr((char *) (p
+ 27), s
->drive_model_str
, 40);/* Model number */
330 #if MAX_MULT_SECTORS > 1
331 put_le16(p
+ 47, 0x8000 | MAX_MULT_SECTORS
);
333 put_le16(p
+ 47, 0x0000);
335 put_le16(p
+ 49, 0x0f00); /* Capabilities */
336 put_le16(p
+ 51, 0x0002); /* PIO cycle timing mode */
337 put_le16(p
+ 52, 0x0001); /* DMA cycle timing mode */
338 put_le16(p
+ 53, 0x0003); /* Translation params valid */
339 put_le16(p
+ 54, s
->cylinders
); /* Current cylinders */
340 put_le16(p
+ 55, s
->heads
); /* Current heads */
341 put_le16(p
+ 56, s
->sectors
); /* Current sectors */
342 put_le16(p
+ 57, cur_sec
); /* Current capacity */
343 put_le16(p
+ 58, cur_sec
>> 16); /* Current capacity */
344 if (s
->mult_sectors
) /* Multiple sector setting */
345 put_le16(p
+ 59, 0x100 | s
->mult_sectors
);
346 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
347 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
348 put_le16(p
+ 63, 0x0203); /* Multiword DMA capability */
349 put_le16(p
+ 64, 0x0001); /* Flow Control PIO support */
350 put_le16(p
+ 65, 0x0096); /* Min. Multiword DMA cycle */
351 put_le16(p
+ 66, 0x0096); /* Rec. Multiword DMA cycle */
352 put_le16(p
+ 68, 0x00b4); /* Min. PIO cycle time */
353 put_le16(p
+ 82, 0x400c); /* Command Set supported */
354 put_le16(p
+ 83, 0x7068); /* Command Set supported */
355 put_le16(p
+ 84, 0x4000); /* Features supported */
356 put_le16(p
+ 85, 0x000c); /* Command Set enabled */
357 put_le16(p
+ 86, 0x7044); /* Command Set enabled */
358 put_le16(p
+ 87, 0x4000); /* Features enabled */
359 put_le16(p
+ 91, 0x4060); /* Current APM level */
360 put_le16(p
+ 129, 0x0002); /* Current features option */
361 put_le16(p
+ 130, 0x0005); /* Reassigned sectors */
362 put_le16(p
+ 131, 0x0001); /* Initial power mode */
363 put_le16(p
+ 132, 0x0000); /* User signature */
364 put_le16(p
+ 160, 0x8100); /* Power requirement */
365 put_le16(p
+ 161, 0x8001); /* CF command set */
367 ide_cfata_identify_size(s
);
371 memcpy(s
->io_buffer
, p
, sizeof(s
->identify_data
));
374 static void ide_set_signature(IDEState
*s
)
376 s
->select
&= ~(ATA_DEV_HS
); /* clear head */
380 if (s
->drive_kind
== IDE_CD
) {
392 static bool ide_sect_range_ok(IDEState
*s
,
393 uint64_t sector
, uint64_t nb_sectors
)
395 uint64_t total_sectors
;
397 blk_get_geometry(s
->blk
, &total_sectors
);
398 if (sector
> total_sectors
|| nb_sectors
> total_sectors
- sector
) {
404 typedef struct TrimAIOCB
{
414 static void trim_aio_cancel(BlockAIOCB
*acb
)
416 TrimAIOCB
*iocb
= container_of(acb
, TrimAIOCB
, common
);
418 /* Exit the loop so ide_issue_trim_cb will not continue */
419 iocb
->j
= iocb
->qiov
->niov
- 1;
420 iocb
->i
= (iocb
->qiov
->iov
[iocb
->j
].iov_len
/ 8) - 1;
422 iocb
->ret
= -ECANCELED
;
425 blk_aio_cancel_async(iocb
->aiocb
);
430 static const AIOCBInfo trim_aiocb_info
= {
431 .aiocb_size
= sizeof(TrimAIOCB
),
432 .cancel_async
= trim_aio_cancel
,
435 static void ide_trim_bh_cb(void *opaque
)
437 TrimAIOCB
*iocb
= opaque
;
439 iocb
->common
.cb(iocb
->common
.opaque
, iocb
->ret
);
441 qemu_bh_delete(iocb
->bh
);
443 qemu_aio_unref(iocb
);
446 static void ide_issue_trim_cb(void *opaque
, int ret
)
448 TrimAIOCB
*iocb
= opaque
;
449 IDEState
*s
= iocb
->s
;
453 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
455 block_acct_failed(blk_get_stats(s
->blk
), &s
->acct
);
460 while (iocb
->j
< iocb
->qiov
->niov
) {
462 while (++iocb
->i
< iocb
->qiov
->iov
[j
].iov_len
/ 8) {
464 uint64_t *buffer
= iocb
->qiov
->iov
[j
].iov_base
;
466 /* 6-byte LBA + 2-byte range per entry */
467 uint64_t entry
= le64_to_cpu(buffer
[i
]);
468 uint64_t sector
= entry
& 0x0000ffffffffffffULL
;
469 uint16_t count
= entry
>> 48;
475 if (!ide_sect_range_ok(s
, sector
, count
)) {
476 block_acct_invalid(blk_get_stats(s
->blk
), BLOCK_ACCT_UNMAP
);
481 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
482 count
<< BDRV_SECTOR_BITS
, BLOCK_ACCT_UNMAP
);
484 /* Got an entry! Submit and exit. */
485 iocb
->aiocb
= blk_aio_pdiscard(s
->blk
,
486 sector
<< BDRV_SECTOR_BITS
,
487 count
<< BDRV_SECTOR_BITS
,
488 ide_issue_trim_cb
, opaque
);
502 replay_bh_schedule_event(iocb
->bh
);
506 BlockAIOCB
*ide_issue_trim(
507 int64_t offset
, QEMUIOVector
*qiov
,
508 BlockCompletionFunc
*cb
, void *cb_opaque
, void *opaque
)
510 IDEState
*s
= opaque
;
513 iocb
= blk_aio_get(&trim_aiocb_info
, s
->blk
, cb
, cb_opaque
);
515 iocb
->bh
= qemu_bh_new(ide_trim_bh_cb
, iocb
);
520 ide_issue_trim_cb(iocb
, 0);
521 return &iocb
->common
;
524 void ide_abort_command(IDEState
*s
)
526 ide_transfer_stop(s
);
527 s
->status
= READY_STAT
| ERR_STAT
;
531 static void ide_set_retry(IDEState
*s
)
533 s
->bus
->retry_unit
= s
->unit
;
534 s
->bus
->retry_sector_num
= ide_get_sector(s
);
535 s
->bus
->retry_nsector
= s
->nsector
;
538 static void ide_clear_retry(IDEState
*s
)
540 s
->bus
->retry_unit
= -1;
541 s
->bus
->retry_sector_num
= 0;
542 s
->bus
->retry_nsector
= 0;
545 /* prepare data transfer and tell what to do after */
546 bool ide_transfer_start_norecurse(IDEState
*s
, uint8_t *buf
, int size
,
547 EndTransferFunc
*end_transfer_func
)
550 s
->data_end
= buf
+ size
;
552 if (!(s
->status
& ERR_STAT
)) {
553 s
->status
|= DRQ_STAT
;
555 if (!s
->bus
->dma
->ops
->pio_transfer
) {
556 s
->end_transfer_func
= end_transfer_func
;
559 s
->bus
->dma
->ops
->pio_transfer(s
->bus
->dma
);
563 void ide_transfer_start(IDEState
*s
, uint8_t *buf
, int size
,
564 EndTransferFunc
*end_transfer_func
)
566 if (ide_transfer_start_norecurse(s
, buf
, size
, end_transfer_func
)) {
567 end_transfer_func(s
);
571 static void ide_cmd_done(IDEState
*s
)
573 if (s
->bus
->dma
->ops
->cmd_done
) {
574 s
->bus
->dma
->ops
->cmd_done(s
->bus
->dma
);
578 static void ide_transfer_halt(IDEState
*s
)
580 s
->end_transfer_func
= ide_transfer_stop
;
581 s
->data_ptr
= s
->io_buffer
;
582 s
->data_end
= s
->io_buffer
;
583 s
->status
&= ~DRQ_STAT
;
586 void ide_transfer_stop(IDEState
*s
)
588 ide_transfer_halt(s
);
592 int64_t ide_get_sector(IDEState
*s
)
595 if (s
->select
& (ATA_DEV_LBA
)) {
597 sector_num
= ((int64_t)s
->hob_hcyl
<< 40) |
598 ((int64_t) s
->hob_lcyl
<< 32) |
599 ((int64_t) s
->hob_sector
<< 24) |
600 ((int64_t) s
->hcyl
<< 16) |
601 ((int64_t) s
->lcyl
<< 8) | s
->sector
;
604 sector_num
= ((s
->select
& (ATA_DEV_LBA_MSB
)) << 24) |
605 (s
->hcyl
<< 16) | (s
->lcyl
<< 8) | s
->sector
;
609 sector_num
= ((s
->hcyl
<< 8) | s
->lcyl
) * s
->heads
* s
->sectors
+
610 (s
->select
& (ATA_DEV_HS
)) * s
->sectors
+ (s
->sector
- 1);
616 void ide_set_sector(IDEState
*s
, int64_t sector_num
)
619 if (s
->select
& (ATA_DEV_LBA
)) {
621 s
->sector
= sector_num
;
622 s
->lcyl
= sector_num
>> 8;
623 s
->hcyl
= sector_num
>> 16;
624 s
->hob_sector
= sector_num
>> 24;
625 s
->hob_lcyl
= sector_num
>> 32;
626 s
->hob_hcyl
= sector_num
>> 40;
629 s
->select
= (s
->select
& ~(ATA_DEV_LBA_MSB
)) |
630 ((sector_num
>> 24) & (ATA_DEV_LBA_MSB
));
631 s
->hcyl
= (sector_num
>> 16);
632 s
->lcyl
= (sector_num
>> 8);
633 s
->sector
= (sector_num
);
637 cyl
= sector_num
/ (s
->heads
* s
->sectors
);
638 r
= sector_num
% (s
->heads
* s
->sectors
);
641 s
->select
= (s
->select
& ~(ATA_DEV_HS
)) |
642 ((r
/ s
->sectors
) & (ATA_DEV_HS
));
643 s
->sector
= (r
% s
->sectors
) + 1;
647 static void ide_rw_error(IDEState
*s
) {
648 ide_abort_command(s
);
652 static void ide_buffered_readv_cb(void *opaque
, int ret
)
654 IDEBufferedRequest
*req
= opaque
;
655 if (!req
->orphaned
) {
657 assert(req
->qiov
.size
== req
->original_qiov
->size
);
658 qemu_iovec_from_buf(req
->original_qiov
, 0,
659 req
->qiov
.local_iov
.iov_base
,
660 req
->original_qiov
->size
);
662 req
->original_cb(req
->original_opaque
, ret
);
664 QLIST_REMOVE(req
, list
);
665 qemu_vfree(qemu_iovec_buf(&req
->qiov
));
669 #define MAX_BUFFERED_REQS 16
671 BlockAIOCB
*ide_buffered_readv(IDEState
*s
, int64_t sector_num
,
672 QEMUIOVector
*iov
, int nb_sectors
,
673 BlockCompletionFunc
*cb
, void *opaque
)
676 IDEBufferedRequest
*req
;
679 QLIST_FOREACH(req
, &s
->buffered_requests
, list
) {
682 if (c
> MAX_BUFFERED_REQS
) {
683 return blk_abort_aio_request(s
->blk
, cb
, opaque
, -EIO
);
686 req
= g_new0(IDEBufferedRequest
, 1);
687 req
->original_qiov
= iov
;
688 req
->original_cb
= cb
;
689 req
->original_opaque
= opaque
;
690 qemu_iovec_init_buf(&req
->qiov
, blk_blockalign(s
->blk
, iov
->size
),
693 aioreq
= blk_aio_preadv(s
->blk
, sector_num
<< BDRV_SECTOR_BITS
,
694 &req
->qiov
, 0, ide_buffered_readv_cb
, req
);
696 QLIST_INSERT_HEAD(&s
->buffered_requests
, req
, list
);
701 * Cancel all pending DMA requests.
702 * Any buffered DMA requests are instantly canceled,
703 * but any pending unbuffered DMA requests must be waited on.
705 void ide_cancel_dma_sync(IDEState
*s
)
707 IDEBufferedRequest
*req
;
709 /* First invoke the callbacks of all buffered requests
710 * and flag those requests as orphaned. Ideally there
711 * are no unbuffered (Scatter Gather DMA Requests or
712 * write requests) pending and we can avoid to drain. */
713 QLIST_FOREACH(req
, &s
->buffered_requests
, list
) {
714 if (!req
->orphaned
) {
715 trace_ide_cancel_dma_sync_buffered(req
->original_cb
, req
);
716 req
->original_cb(req
->original_opaque
, -ECANCELED
);
718 req
->orphaned
= true;
722 * We can't cancel Scatter Gather DMA in the middle of the
723 * operation or a partial (not full) DMA transfer would reach
724 * the storage so we wait for completion instead (we behave
725 * like if the DMA was completed by the time the guest trying
726 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
729 * In the future we'll be able to safely cancel the I/O if the
730 * whole DMA operation will be submitted to disk with a single
731 * aio operation with preadv/pwritev.
733 if (s
->bus
->dma
->aiocb
) {
734 trace_ide_cancel_dma_sync_remaining();
736 assert(s
->bus
->dma
->aiocb
== NULL
);
740 static void ide_sector_read(IDEState
*s
);
742 static void ide_sector_read_cb(void *opaque
, int ret
)
744 IDEState
*s
= opaque
;
748 s
->status
&= ~BUSY_STAT
;
751 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_PIO
|
757 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
760 if (n
> s
->req_nb_sectors
) {
761 n
= s
->req_nb_sectors
;
764 ide_set_sector(s
, ide_get_sector(s
) + n
);
766 /* Allow the guest to read the io_buffer */
767 ide_transfer_start(s
, s
->io_buffer
, n
* BDRV_SECTOR_SIZE
, ide_sector_read
);
771 static void ide_sector_read(IDEState
*s
)
776 s
->status
= READY_STAT
| SEEK_STAT
;
777 s
->error
= 0; /* not needed by IDE spec, but needed by Windows */
778 sector_num
= ide_get_sector(s
);
782 ide_transfer_stop(s
);
786 s
->status
|= BUSY_STAT
;
788 if (n
> s
->req_nb_sectors
) {
789 n
= s
->req_nb_sectors
;
792 trace_ide_sector_read(sector_num
, n
);
794 if (!ide_sect_range_ok(s
, sector_num
, n
)) {
796 block_acct_invalid(blk_get_stats(s
->blk
), BLOCK_ACCT_READ
);
800 qemu_iovec_init_buf(&s
->qiov
, s
->io_buffer
, n
* BDRV_SECTOR_SIZE
);
802 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
803 n
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_READ
);
804 s
->pio_aiocb
= ide_buffered_readv(s
, sector_num
, &s
->qiov
, n
,
805 ide_sector_read_cb
, s
);
808 void dma_buf_commit(IDEState
*s
, uint32_t tx_bytes
)
810 if (s
->bus
->dma
->ops
->commit_buf
) {
811 s
->bus
->dma
->ops
->commit_buf(s
->bus
->dma
, tx_bytes
);
813 s
->io_buffer_offset
+= tx_bytes
;
814 qemu_sglist_destroy(&s
->sg
);
817 void ide_set_inactive(IDEState
*s
, bool more
)
819 s
->bus
->dma
->aiocb
= NULL
;
821 if (s
->bus
->dma
->ops
->set_inactive
) {
822 s
->bus
->dma
->ops
->set_inactive(s
->bus
->dma
, more
);
827 void ide_dma_error(IDEState
*s
)
829 dma_buf_commit(s
, 0);
830 ide_abort_command(s
);
831 ide_set_inactive(s
, false);
835 int ide_handle_rw_error(IDEState
*s
, int error
, int op
)
837 bool is_read
= (op
& IDE_RETRY_READ
) != 0;
838 BlockErrorAction action
= blk_get_error_action(s
->blk
, is_read
, error
);
840 if (action
== BLOCK_ERROR_ACTION_STOP
) {
841 assert(s
->bus
->retry_unit
== s
->unit
);
842 s
->bus
->error_status
= op
;
843 } else if (action
== BLOCK_ERROR_ACTION_REPORT
) {
844 block_acct_failed(blk_get_stats(s
->blk
), &s
->acct
);
845 if (IS_IDE_RETRY_DMA(op
)) {
847 } else if (IS_IDE_RETRY_ATAPI(op
)) {
848 ide_atapi_io_error(s
, -error
);
853 blk_error_action(s
->blk
, action
, is_read
, error
);
854 return action
!= BLOCK_ERROR_ACTION_IGNORE
;
857 static void ide_dma_cb(void *opaque
, int ret
)
859 IDEState
*s
= opaque
;
863 bool stay_active
= false;
864 int32_t prep_size
= 0;
866 if (ret
== -EINVAL
) {
872 if (ide_handle_rw_error(s
, -ret
, ide_dma_cmd_to_retry(s
->dma_cmd
))) {
873 s
->bus
->dma
->aiocb
= NULL
;
874 dma_buf_commit(s
, 0);
879 if (s
->io_buffer_size
> s
->nsector
* 512) {
881 * The PRDs were longer than needed for this request.
882 * The Active bit must remain set after the request completes.
887 n
= s
->io_buffer_size
>> 9;
890 sector_num
= ide_get_sector(s
);
892 assert(n
* 512 == s
->sg
.size
);
893 dma_buf_commit(s
, s
->sg
.size
);
895 ide_set_sector(s
, sector_num
);
899 /* end of transfer ? */
900 if (s
->nsector
== 0) {
901 s
->status
= READY_STAT
| SEEK_STAT
;
906 /* launch next transfer */
908 s
->io_buffer_index
= 0;
909 s
->io_buffer_size
= n
* 512;
910 prep_size
= s
->bus
->dma
->ops
->prepare_buf(s
->bus
->dma
, s
->io_buffer_size
);
911 /* prepare_buf() must succeed and respect the limit */
912 assert(prep_size
>= 0 && prep_size
<= n
* 512);
915 * Now prep_size stores the number of bytes in the sglist, and
916 * s->io_buffer_size stores the number of bytes described by the PRDs.
919 if (prep_size
< n
* 512) {
921 * The PRDs are too short for this request. Error condition!
922 * Reset the Active bit and don't raise the interrupt.
924 s
->status
= READY_STAT
| SEEK_STAT
;
925 dma_buf_commit(s
, 0);
929 trace_ide_dma_cb(s
, sector_num
, n
, IDE_DMA_CMD_str(s
->dma_cmd
));
931 if ((s
->dma_cmd
== IDE_DMA_READ
|| s
->dma_cmd
== IDE_DMA_WRITE
) &&
932 !ide_sect_range_ok(s
, sector_num
, n
)) {
934 block_acct_invalid(blk_get_stats(s
->blk
), s
->acct
.type
);
938 offset
= sector_num
<< BDRV_SECTOR_BITS
;
939 switch (s
->dma_cmd
) {
941 s
->bus
->dma
->aiocb
= dma_blk_read(s
->blk
, &s
->sg
, offset
,
942 BDRV_SECTOR_SIZE
, ide_dma_cb
, s
);
945 s
->bus
->dma
->aiocb
= dma_blk_write(s
->blk
, &s
->sg
, offset
,
946 BDRV_SECTOR_SIZE
, ide_dma_cb
, s
);
949 s
->bus
->dma
->aiocb
= dma_blk_io(blk_get_aio_context(s
->blk
),
950 &s
->sg
, offset
, BDRV_SECTOR_SIZE
,
951 ide_issue_trim
, s
, ide_dma_cb
, s
,
952 DMA_DIRECTION_TO_DEVICE
);
960 if (s
->dma_cmd
== IDE_DMA_READ
|| s
->dma_cmd
== IDE_DMA_WRITE
) {
961 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
963 ide_set_inactive(s
, stay_active
);
966 static void ide_sector_start_dma(IDEState
*s
, enum ide_dma_cmd dma_cmd
)
968 s
->status
= READY_STAT
| SEEK_STAT
| DRQ_STAT
;
969 s
->io_buffer_size
= 0;
970 s
->dma_cmd
= dma_cmd
;
974 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
975 s
->nsector
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_READ
);
978 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
979 s
->nsector
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_WRITE
);
985 ide_start_dma(s
, ide_dma_cb
);
988 void ide_start_dma(IDEState
*s
, BlockCompletionFunc
*cb
)
990 s
->io_buffer_index
= 0;
992 if (s
->bus
->dma
->ops
->start_dma
) {
993 s
->bus
->dma
->ops
->start_dma(s
->bus
->dma
, s
, cb
);
997 static void ide_sector_write(IDEState
*s
);
999 static void ide_sector_write_timer_cb(void *opaque
)
1001 IDEState
*s
= opaque
;
1002 ide_set_irq(s
->bus
);
1005 static void ide_sector_write_cb(void *opaque
, int ret
)
1007 IDEState
*s
= opaque
;
1010 s
->pio_aiocb
= NULL
;
1011 s
->status
&= ~BUSY_STAT
;
1014 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_PIO
)) {
1019 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
1022 if (n
> s
->req_nb_sectors
) {
1023 n
= s
->req_nb_sectors
;
1027 ide_set_sector(s
, ide_get_sector(s
) + n
);
1028 if (s
->nsector
== 0) {
1029 /* no more sectors to write */
1030 ide_transfer_stop(s
);
1032 int n1
= s
->nsector
;
1033 if (n1
> s
->req_nb_sectors
) {
1034 n1
= s
->req_nb_sectors
;
1036 ide_transfer_start(s
, s
->io_buffer
, n1
* BDRV_SECTOR_SIZE
,
1040 if (win2k_install_hack
&& ((++s
->irq_count
% 16) == 0)) {
1041 /* It seems there is a bug in the Windows 2000 installer HDD
1042 IDE driver which fills the disk with empty logs when the
1043 IDE write IRQ comes too early. This hack tries to correct
1044 that at the expense of slower write performances. Use this
1045 option _only_ to install Windows 2000. You must disable it
1047 timer_mod(s
->sector_write_timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) +
1048 (NANOSECONDS_PER_SECOND
/ 1000));
1050 ide_set_irq(s
->bus
);
1054 static void ide_sector_write(IDEState
*s
)
1059 s
->status
= READY_STAT
| SEEK_STAT
| BUSY_STAT
;
1060 sector_num
= ide_get_sector(s
);
1063 if (n
> s
->req_nb_sectors
) {
1064 n
= s
->req_nb_sectors
;
1067 trace_ide_sector_write(sector_num
, n
);
1069 if (!ide_sect_range_ok(s
, sector_num
, n
)) {
1071 block_acct_invalid(blk_get_stats(s
->blk
), BLOCK_ACCT_WRITE
);
1075 qemu_iovec_init_buf(&s
->qiov
, s
->io_buffer
, n
* BDRV_SECTOR_SIZE
);
1077 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
,
1078 n
* BDRV_SECTOR_SIZE
, BLOCK_ACCT_WRITE
);
1079 s
->pio_aiocb
= blk_aio_pwritev(s
->blk
, sector_num
<< BDRV_SECTOR_BITS
,
1080 &s
->qiov
, 0, ide_sector_write_cb
, s
);
1083 static void ide_flush_cb(void *opaque
, int ret
)
1085 IDEState
*s
= opaque
;
1087 s
->pio_aiocb
= NULL
;
1090 /* XXX: What sector number to set here? */
1091 if (ide_handle_rw_error(s
, -ret
, IDE_RETRY_FLUSH
)) {
1097 block_acct_done(blk_get_stats(s
->blk
), &s
->acct
);
1099 s
->status
= READY_STAT
| SEEK_STAT
;
1101 ide_set_irq(s
->bus
);
1104 static void ide_flush_cache(IDEState
*s
)
1106 if (s
->blk
== NULL
) {
1111 s
->status
|= BUSY_STAT
;
1113 block_acct_start(blk_get_stats(s
->blk
), &s
->acct
, 0, BLOCK_ACCT_FLUSH
);
1114 s
->pio_aiocb
= blk_aio_flush(s
->blk
, ide_flush_cb
, s
);
1117 static void ide_cfata_metadata_inquiry(IDEState
*s
)
1122 p
= (uint16_t *) s
->io_buffer
;
1123 memset(p
, 0, 0x200);
1124 spd
= ((s
->mdata_size
- 1) >> 9) + 1;
1126 put_le16(p
+ 0, 0x0001); /* Data format revision */
1127 put_le16(p
+ 1, 0x0000); /* Media property: silicon */
1128 put_le16(p
+ 2, s
->media_changed
); /* Media status */
1129 put_le16(p
+ 3, s
->mdata_size
& 0xffff); /* Capacity in bytes (low) */
1130 put_le16(p
+ 4, s
->mdata_size
>> 16); /* Capacity in bytes (high) */
1131 put_le16(p
+ 5, spd
& 0xffff); /* Sectors per device (low) */
1132 put_le16(p
+ 6, spd
>> 16); /* Sectors per device (high) */
1135 static void ide_cfata_metadata_read(IDEState
*s
)
1139 if (((s
->hcyl
<< 16) | s
->lcyl
) << 9 > s
->mdata_size
+ 2) {
1140 s
->status
= ERR_STAT
;
1141 s
->error
= ABRT_ERR
;
1145 p
= (uint16_t *) s
->io_buffer
;
1146 memset(p
, 0, 0x200);
1148 put_le16(p
+ 0, s
->media_changed
); /* Media status */
1149 memcpy(p
+ 1, s
->mdata_storage
+ (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1150 MIN(MIN(s
->mdata_size
- (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1151 s
->nsector
<< 9), 0x200 - 2));
1154 static void ide_cfata_metadata_write(IDEState
*s
)
1156 if (((s
->hcyl
<< 16) | s
->lcyl
) << 9 > s
->mdata_size
+ 2) {
1157 s
->status
= ERR_STAT
;
1158 s
->error
= ABRT_ERR
;
1162 s
->media_changed
= 0;
1164 memcpy(s
->mdata_storage
+ (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1166 MIN(MIN(s
->mdata_size
- (((s
->hcyl
<< 16) | s
->lcyl
) << 9),
1167 s
->nsector
<< 9), 0x200 - 2));
1170 /* called when the inserted state of the media has changed */
1171 static void ide_cd_change_cb(void *opaque
, bool load
, Error
**errp
)
1173 IDEState
*s
= opaque
;
1174 uint64_t nb_sectors
;
1176 s
->tray_open
= !load
;
1177 blk_get_geometry(s
->blk
, &nb_sectors
);
1178 s
->nb_sectors
= nb_sectors
;
1181 * First indicate to the guest that a CD has been removed. That's
1182 * done on the next command the guest sends us.
1184 * Then we set UNIT_ATTENTION, by which the guest will
1185 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1187 s
->cdrom_changed
= 1;
1188 s
->events
.new_media
= true;
1189 s
->events
.eject_request
= false;
1190 ide_set_irq(s
->bus
);
1193 static void ide_cd_eject_request_cb(void *opaque
, bool force
)
1195 IDEState
*s
= opaque
;
1197 s
->events
.eject_request
= true;
1199 s
->tray_locked
= false;
1201 ide_set_irq(s
->bus
);
1204 static void ide_cmd_lba48_transform(IDEState
*s
, int lba48
)
1208 /* handle the 'magic' 0 nsector count conversion here. to avoid
1209 * fiddling with the rest of the read logic, we just store the
1210 * full sector count in ->nsector and ignore ->hob_nsector from now
1216 if (!s
->nsector
&& !s
->hob_nsector
)
1219 int lo
= s
->nsector
;
1220 int hi
= s
->hob_nsector
;
1222 s
->nsector
= (hi
<< 8) | lo
;
1227 static void ide_clear_hob(IDEBus
*bus
)
1229 /* any write clears HOB high bit of device control register */
1230 bus
->cmd
&= ~(IDE_CTRL_HOB
);
1233 /* IOport [W]rite [R]egisters */
1234 enum ATA_IOPORT_WR
{
1235 ATA_IOPORT_WR_DATA
= 0,
1236 ATA_IOPORT_WR_FEATURES
= 1,
1237 ATA_IOPORT_WR_SECTOR_COUNT
= 2,
1238 ATA_IOPORT_WR_SECTOR_NUMBER
= 3,
1239 ATA_IOPORT_WR_CYLINDER_LOW
= 4,
1240 ATA_IOPORT_WR_CYLINDER_HIGH
= 5,
1241 ATA_IOPORT_WR_DEVICE_HEAD
= 6,
1242 ATA_IOPORT_WR_COMMAND
= 7,
1243 ATA_IOPORT_WR_NUM_REGISTERS
,
1246 const char *ATA_IOPORT_WR_lookup
[ATA_IOPORT_WR_NUM_REGISTERS
] = {
1247 [ATA_IOPORT_WR_DATA
] = "Data",
1248 [ATA_IOPORT_WR_FEATURES
] = "Features",
1249 [ATA_IOPORT_WR_SECTOR_COUNT
] = "Sector Count",
1250 [ATA_IOPORT_WR_SECTOR_NUMBER
] = "Sector Number",
1251 [ATA_IOPORT_WR_CYLINDER_LOW
] = "Cylinder Low",
1252 [ATA_IOPORT_WR_CYLINDER_HIGH
] = "Cylinder High",
1253 [ATA_IOPORT_WR_DEVICE_HEAD
] = "Device/Head",
1254 [ATA_IOPORT_WR_COMMAND
] = "Command"
1257 void ide_ioport_write(void *opaque
, uint32_t addr
, uint32_t val
)
1259 IDEBus
*bus
= opaque
;
1260 IDEState
*s
= idebus_active_if(bus
);
1261 int reg_num
= addr
& 7;
1263 trace_ide_ioport_write(addr
, ATA_IOPORT_WR_lookup
[reg_num
], val
, bus
, s
);
1265 /* ignore writes to command block while busy with previous command */
1266 if (reg_num
!= 7 && (s
->status
& (BUSY_STAT
|DRQ_STAT
))) {
1270 /* NOTE: Device0 and Device1 both receive incoming register writes.
1271 * (They're on the same bus! They have to!) */
1276 case ATA_IOPORT_WR_FEATURES
:
1278 bus
->ifs
[0].hob_feature
= bus
->ifs
[0].feature
;
1279 bus
->ifs
[1].hob_feature
= bus
->ifs
[1].feature
;
1280 bus
->ifs
[0].feature
= val
;
1281 bus
->ifs
[1].feature
= val
;
1283 case ATA_IOPORT_WR_SECTOR_COUNT
:
1285 bus
->ifs
[0].hob_nsector
= bus
->ifs
[0].nsector
;
1286 bus
->ifs
[1].hob_nsector
= bus
->ifs
[1].nsector
;
1287 bus
->ifs
[0].nsector
= val
;
1288 bus
->ifs
[1].nsector
= val
;
1290 case ATA_IOPORT_WR_SECTOR_NUMBER
:
1292 bus
->ifs
[0].hob_sector
= bus
->ifs
[0].sector
;
1293 bus
->ifs
[1].hob_sector
= bus
->ifs
[1].sector
;
1294 bus
->ifs
[0].sector
= val
;
1295 bus
->ifs
[1].sector
= val
;
1297 case ATA_IOPORT_WR_CYLINDER_LOW
:
1299 bus
->ifs
[0].hob_lcyl
= bus
->ifs
[0].lcyl
;
1300 bus
->ifs
[1].hob_lcyl
= bus
->ifs
[1].lcyl
;
1301 bus
->ifs
[0].lcyl
= val
;
1302 bus
->ifs
[1].lcyl
= val
;
1304 case ATA_IOPORT_WR_CYLINDER_HIGH
:
1306 bus
->ifs
[0].hob_hcyl
= bus
->ifs
[0].hcyl
;
1307 bus
->ifs
[1].hob_hcyl
= bus
->ifs
[1].hcyl
;
1308 bus
->ifs
[0].hcyl
= val
;
1309 bus
->ifs
[1].hcyl
= val
;
1311 case ATA_IOPORT_WR_DEVICE_HEAD
:
1313 bus
->ifs
[0].select
= val
| (ATA_DEV_ALWAYS_ON
);
1314 bus
->ifs
[1].select
= val
| (ATA_DEV_ALWAYS_ON
);
1316 bus
->unit
= (val
& (ATA_DEV_SELECT
)) ? 1 : 0;
1319 case ATA_IOPORT_WR_COMMAND
:
1321 qemu_irq_lower(bus
->irq
);
1322 ide_exec_cmd(bus
, val
);
1327 static void ide_reset(IDEState
*s
)
1332 blk_aio_cancel(s
->pio_aiocb
);
1333 s
->pio_aiocb
= NULL
;
1336 if (s
->drive_kind
== IDE_CFATA
)
1337 s
->mult_sectors
= 0;
1339 s
->mult_sectors
= MAX_MULT_SECTORS
;
1355 s
->select
= (ATA_DEV_ALWAYS_ON
);
1356 s
->status
= READY_STAT
| SEEK_STAT
;
1360 /* ATAPI specific */
1363 s
->cdrom_changed
= 0;
1364 s
->packet_transfer_size
= 0;
1365 s
->elementary_transfer_size
= 0;
1366 s
->io_buffer_index
= 0;
1367 s
->cd_sector_size
= 0;
1372 s
->io_buffer_size
= 0;
1373 s
->req_nb_sectors
= 0;
1375 ide_set_signature(s
);
1376 /* init the transfer handler so that 0xffff is returned on data
1378 s
->end_transfer_func
= ide_dummy_transfer_stop
;
1379 ide_dummy_transfer_stop(s
);
1380 s
->media_changed
= 0;
1383 static bool cmd_nop(IDEState
*s
, uint8_t cmd
)
1388 static bool cmd_device_reset(IDEState
*s
, uint8_t cmd
)
1390 /* Halt PIO (in the DRQ phase), then DMA */
1391 ide_transfer_halt(s
);
1392 ide_cancel_dma_sync(s
);
1394 /* Reset any PIO commands, reset signature, etc */
1397 /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1398 * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1401 /* Do not overwrite status register */
1405 static bool cmd_data_set_management(IDEState
*s
, uint8_t cmd
)
1407 switch (s
->feature
) {
1410 ide_sector_start_dma(s
, IDE_DMA_TRIM
);
1416 ide_abort_command(s
);
1420 static bool cmd_identify(IDEState
*s
, uint8_t cmd
)
1422 if (s
->blk
&& s
->drive_kind
!= IDE_CD
) {
1423 if (s
->drive_kind
!= IDE_CFATA
) {
1426 ide_cfata_identify(s
);
1428 s
->status
= READY_STAT
| SEEK_STAT
;
1429 ide_transfer_start(s
, s
->io_buffer
, 512, ide_transfer_stop
);
1430 ide_set_irq(s
->bus
);
1433 if (s
->drive_kind
== IDE_CD
) {
1434 ide_set_signature(s
);
1436 ide_abort_command(s
);
1442 static bool cmd_verify(IDEState
*s
, uint8_t cmd
)
1444 bool lba48
= (cmd
== WIN_VERIFY_EXT
);
1446 /* do sector number check ? */
1447 ide_cmd_lba48_transform(s
, lba48
);
1452 static bool cmd_set_multiple_mode(IDEState
*s
, uint8_t cmd
)
1454 if (s
->drive_kind
== IDE_CFATA
&& s
->nsector
== 0) {
1455 /* Disable Read and Write Multiple */
1456 s
->mult_sectors
= 0;
1457 } else if ((s
->nsector
& 0xff) != 0 &&
1458 ((s
->nsector
& 0xff) > MAX_MULT_SECTORS
||
1459 (s
->nsector
& (s
->nsector
- 1)) != 0)) {
1460 ide_abort_command(s
);
1462 s
->mult_sectors
= s
->nsector
& 0xff;
1468 static bool cmd_read_multiple(IDEState
*s
, uint8_t cmd
)
1470 bool lba48
= (cmd
== WIN_MULTREAD_EXT
);
1472 if (!s
->blk
|| !s
->mult_sectors
) {
1473 ide_abort_command(s
);
1477 ide_cmd_lba48_transform(s
, lba48
);
1478 s
->req_nb_sectors
= s
->mult_sectors
;
1483 static bool cmd_write_multiple(IDEState
*s
, uint8_t cmd
)
1485 bool lba48
= (cmd
== WIN_MULTWRITE_EXT
);
1488 if (!s
->blk
|| !s
->mult_sectors
) {
1489 ide_abort_command(s
);
1493 ide_cmd_lba48_transform(s
, lba48
);
1495 s
->req_nb_sectors
= s
->mult_sectors
;
1496 n
= MIN(s
->nsector
, s
->req_nb_sectors
);
1498 s
->status
= SEEK_STAT
| READY_STAT
;
1499 ide_transfer_start(s
, s
->io_buffer
, 512 * n
, ide_sector_write
);
1501 s
->media_changed
= 1;
1506 static bool cmd_read_pio(IDEState
*s
, uint8_t cmd
)
1508 bool lba48
= (cmd
== WIN_READ_EXT
);
1510 if (s
->drive_kind
== IDE_CD
) {
1511 ide_set_signature(s
); /* odd, but ATA4 8.27.5.2 requires it */
1512 ide_abort_command(s
);
1517 ide_abort_command(s
);
1521 ide_cmd_lba48_transform(s
, lba48
);
1522 s
->req_nb_sectors
= 1;
1528 static bool cmd_write_pio(IDEState
*s
, uint8_t cmd
)
1530 bool lba48
= (cmd
== WIN_WRITE_EXT
);
1533 ide_abort_command(s
);
1537 ide_cmd_lba48_transform(s
, lba48
);
1539 s
->req_nb_sectors
= 1;
1540 s
->status
= SEEK_STAT
| READY_STAT
;
1541 ide_transfer_start(s
, s
->io_buffer
, 512, ide_sector_write
);
1543 s
->media_changed
= 1;
1548 static bool cmd_read_dma(IDEState
*s
, uint8_t cmd
)
1550 bool lba48
= (cmd
== WIN_READDMA_EXT
);
1553 ide_abort_command(s
);
1557 ide_cmd_lba48_transform(s
, lba48
);
1558 ide_sector_start_dma(s
, IDE_DMA_READ
);
1563 static bool cmd_write_dma(IDEState
*s
, uint8_t cmd
)
1565 bool lba48
= (cmd
== WIN_WRITEDMA_EXT
);
1568 ide_abort_command(s
);
1572 ide_cmd_lba48_transform(s
, lba48
);
1573 ide_sector_start_dma(s
, IDE_DMA_WRITE
);
1575 s
->media_changed
= 1;
1580 static bool cmd_flush_cache(IDEState
*s
, uint8_t cmd
)
1586 static bool cmd_seek(IDEState
*s
, uint8_t cmd
)
1588 /* XXX: Check that seek is within bounds */
1592 static bool cmd_read_native_max(IDEState
*s
, uint8_t cmd
)
1594 bool lba48
= (cmd
== WIN_READ_NATIVE_MAX_EXT
);
1596 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1597 if (s
->nb_sectors
== 0) {
1598 ide_abort_command(s
);
1602 ide_cmd_lba48_transform(s
, lba48
);
1603 ide_set_sector(s
, s
->nb_sectors
- 1);
1608 static bool cmd_check_power_mode(IDEState
*s
, uint8_t cmd
)
1610 s
->nsector
= 0xff; /* device active or idle */
1614 static bool cmd_set_features(IDEState
*s
, uint8_t cmd
)
1616 uint16_t *identify_data
;
1619 ide_abort_command(s
);
1623 /* XXX: valid for CDROM ? */
1624 switch (s
->feature
) {
1625 case 0x02: /* write cache enable */
1626 blk_set_enable_write_cache(s
->blk
, true);
1627 identify_data
= (uint16_t *)s
->identify_data
;
1628 put_le16(identify_data
+ 85, (1 << 14) | (1 << 5) | 1);
1630 case 0x82: /* write cache disable */
1631 blk_set_enable_write_cache(s
->blk
, false);
1632 identify_data
= (uint16_t *)s
->identify_data
;
1633 put_le16(identify_data
+ 85, (1 << 14) | 1);
1636 case 0xcc: /* reverting to power-on defaults enable */
1637 case 0x66: /* reverting to power-on defaults disable */
1638 case 0xaa: /* read look-ahead enable */
1639 case 0x55: /* read look-ahead disable */
1640 case 0x05: /* set advanced power management mode */
1641 case 0x85: /* disable advanced power management mode */
1642 case 0x69: /* NOP */
1643 case 0x67: /* NOP */
1644 case 0x96: /* NOP */
1645 case 0x9a: /* NOP */
1646 case 0x42: /* enable Automatic Acoustic Mode */
1647 case 0xc2: /* disable Automatic Acoustic Mode */
1649 case 0x03: /* set transfer mode */
1651 uint8_t val
= s
->nsector
& 0x07;
1652 identify_data
= (uint16_t *)s
->identify_data
;
1654 switch (s
->nsector
>> 3) {
1655 case 0x00: /* pio default */
1656 case 0x01: /* pio mode */
1657 put_le16(identify_data
+ 62, 0x07);
1658 put_le16(identify_data
+ 63, 0x07);
1659 put_le16(identify_data
+ 88, 0x3f);
1661 case 0x02: /* sigle word dma mode*/
1662 put_le16(identify_data
+ 62, 0x07 | (1 << (val
+ 8)));
1663 put_le16(identify_data
+ 63, 0x07);
1664 put_le16(identify_data
+ 88, 0x3f);
1666 case 0x04: /* mdma mode */
1667 put_le16(identify_data
+ 62, 0x07);
1668 put_le16(identify_data
+ 63, 0x07 | (1 << (val
+ 8)));
1669 put_le16(identify_data
+ 88, 0x3f);
1671 case 0x08: /* udma mode */
1672 put_le16(identify_data
+ 62, 0x07);
1673 put_le16(identify_data
+ 63, 0x07);
1674 put_le16(identify_data
+ 88, 0x3f | (1 << (val
+ 8)));
1684 ide_abort_command(s
);
1689 /*** ATAPI commands ***/
1691 static bool cmd_identify_packet(IDEState
*s
, uint8_t cmd
)
1693 ide_atapi_identify(s
);
1694 s
->status
= READY_STAT
| SEEK_STAT
;
1695 ide_transfer_start(s
, s
->io_buffer
, 512, ide_transfer_stop
);
1696 ide_set_irq(s
->bus
);
1700 static bool cmd_exec_dev_diagnostic(IDEState
*s
, uint8_t cmd
)
1702 ide_set_signature(s
);
1704 if (s
->drive_kind
== IDE_CD
) {
1705 s
->status
= 0; /* ATAPI spec (v6) section 9.10 defines packet
1706 * devices to return a clear status register
1707 * with READY_STAT *not* set. */
1710 s
->status
= READY_STAT
| SEEK_STAT
;
1711 /* The bits of the error register are not as usual for this command!
1712 * They are part of the regular output (this is why ERR_STAT isn't set)
1713 * Device 0 passed, Device 1 passed or not present. */
1715 ide_set_irq(s
->bus
);
1721 static bool cmd_packet(IDEState
*s
, uint8_t cmd
)
1723 /* overlapping commands not supported */
1724 if (s
->feature
& 0x02) {
1725 ide_abort_command(s
);
1729 s
->status
= READY_STAT
| SEEK_STAT
;
1730 s
->atapi_dma
= s
->feature
& 1;
1732 s
->dma_cmd
= IDE_DMA_ATAPI
;
1735 ide_transfer_start(s
, s
->io_buffer
, ATAPI_PACKET_SIZE
,
1741 /*** CF-ATA commands ***/
1743 static bool cmd_cfa_req_ext_error_code(IDEState
*s
, uint8_t cmd
)
1745 s
->error
= 0x09; /* miscellaneous error */
1746 s
->status
= READY_STAT
| SEEK_STAT
;
1747 ide_set_irq(s
->bus
);
1752 static bool cmd_cfa_erase_sectors(IDEState
*s
, uint8_t cmd
)
1754 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1755 * required for Windows 8 to work with AHCI */
1757 if (cmd
== CFA_WEAR_LEVEL
) {
1761 if (cmd
== CFA_ERASE_SECTORS
) {
1762 s
->media_changed
= 1;
1768 static bool cmd_cfa_translate_sector(IDEState
*s
, uint8_t cmd
)
1770 s
->status
= READY_STAT
| SEEK_STAT
;
1772 memset(s
->io_buffer
, 0, 0x200);
1773 s
->io_buffer
[0x00] = s
->hcyl
; /* Cyl MSB */
1774 s
->io_buffer
[0x01] = s
->lcyl
; /* Cyl LSB */
1775 s
->io_buffer
[0x02] = s
->select
; /* Head */
1776 s
->io_buffer
[0x03] = s
->sector
; /* Sector */
1777 s
->io_buffer
[0x04] = ide_get_sector(s
) >> 16; /* LBA MSB */
1778 s
->io_buffer
[0x05] = ide_get_sector(s
) >> 8; /* LBA */
1779 s
->io_buffer
[0x06] = ide_get_sector(s
) >> 0; /* LBA LSB */
1780 s
->io_buffer
[0x13] = 0x00; /* Erase flag */
1781 s
->io_buffer
[0x18] = 0x00; /* Hot count */
1782 s
->io_buffer
[0x19] = 0x00; /* Hot count */
1783 s
->io_buffer
[0x1a] = 0x01; /* Hot count */
1785 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1786 ide_set_irq(s
->bus
);
1791 static bool cmd_cfa_access_metadata_storage(IDEState
*s
, uint8_t cmd
)
1793 switch (s
->feature
) {
1794 case 0x02: /* Inquiry Metadata Storage */
1795 ide_cfata_metadata_inquiry(s
);
1797 case 0x03: /* Read Metadata Storage */
1798 ide_cfata_metadata_read(s
);
1800 case 0x04: /* Write Metadata Storage */
1801 ide_cfata_metadata_write(s
);
1804 ide_abort_command(s
);
1808 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1809 s
->status
= 0x00; /* NOTE: READY is _not_ set */
1810 ide_set_irq(s
->bus
);
1815 static bool cmd_ibm_sense_condition(IDEState
*s
, uint8_t cmd
)
1817 switch (s
->feature
) {
1818 case 0x01: /* sense temperature in device */
1819 s
->nsector
= 0x50; /* +20 C */
1822 ide_abort_command(s
);
1830 /*** SMART commands ***/
1832 static bool cmd_smart(IDEState
*s
, uint8_t cmd
)
1836 if (s
->hcyl
!= 0xc2 || s
->lcyl
!= 0x4f) {
1840 if (!s
->smart_enabled
&& s
->feature
!= SMART_ENABLE
) {
1844 switch (s
->feature
) {
1846 s
->smart_enabled
= 0;
1850 s
->smart_enabled
= 1;
1853 case SMART_ATTR_AUTOSAVE
:
1854 switch (s
->sector
) {
1856 s
->smart_autosave
= 0;
1859 s
->smart_autosave
= 1;
1867 if (!s
->smart_errors
) {
1876 case SMART_READ_THRESH
:
1877 memset(s
->io_buffer
, 0, 0x200);
1878 s
->io_buffer
[0] = 0x01; /* smart struct version */
1880 for (n
= 0; n
< ARRAY_SIZE(smart_attributes
); n
++) {
1881 s
->io_buffer
[2 + 0 + (n
* 12)] = smart_attributes
[n
][0];
1882 s
->io_buffer
[2 + 1 + (n
* 12)] = smart_attributes
[n
][11];
1886 for (n
= 0; n
< 511; n
++) {
1887 s
->io_buffer
[511] += s
->io_buffer
[n
];
1889 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1891 s
->status
= READY_STAT
| SEEK_STAT
;
1892 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1893 ide_set_irq(s
->bus
);
1896 case SMART_READ_DATA
:
1897 memset(s
->io_buffer
, 0, 0x200);
1898 s
->io_buffer
[0] = 0x01; /* smart struct version */
1900 for (n
= 0; n
< ARRAY_SIZE(smart_attributes
); n
++) {
1902 for (i
= 0; i
< 11; i
++) {
1903 s
->io_buffer
[2 + i
+ (n
* 12)] = smart_attributes
[n
][i
];
1907 s
->io_buffer
[362] = 0x02 | (s
->smart_autosave
? 0x80 : 0x00);
1908 if (s
->smart_selftest_count
== 0) {
1909 s
->io_buffer
[363] = 0;
1912 s
->smart_selftest_data
[3 +
1913 (s
->smart_selftest_count
- 1) *
1916 s
->io_buffer
[364] = 0x20;
1917 s
->io_buffer
[365] = 0x01;
1918 /* offline data collection capacity: execute + self-test*/
1919 s
->io_buffer
[367] = (1 << 4 | 1 << 3 | 1);
1920 s
->io_buffer
[368] = 0x03; /* smart capability (1) */
1921 s
->io_buffer
[369] = 0x00; /* smart capability (2) */
1922 s
->io_buffer
[370] = 0x01; /* error logging supported */
1923 s
->io_buffer
[372] = 0x02; /* minutes for poll short test */
1924 s
->io_buffer
[373] = 0x36; /* minutes for poll ext test */
1925 s
->io_buffer
[374] = 0x01; /* minutes for poll conveyance */
1927 for (n
= 0; n
< 511; n
++) {
1928 s
->io_buffer
[511] += s
->io_buffer
[n
];
1930 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1932 s
->status
= READY_STAT
| SEEK_STAT
;
1933 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1934 ide_set_irq(s
->bus
);
1937 case SMART_READ_LOG
:
1938 switch (s
->sector
) {
1939 case 0x01: /* summary smart error log */
1940 memset(s
->io_buffer
, 0, 0x200);
1941 s
->io_buffer
[0] = 0x01;
1942 s
->io_buffer
[1] = 0x00; /* no error entries */
1943 s
->io_buffer
[452] = s
->smart_errors
& 0xff;
1944 s
->io_buffer
[453] = (s
->smart_errors
& 0xff00) >> 8;
1946 for (n
= 0; n
< 511; n
++) {
1947 s
->io_buffer
[511] += s
->io_buffer
[n
];
1949 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1951 case 0x06: /* smart self test log */
1952 memset(s
->io_buffer
, 0, 0x200);
1953 s
->io_buffer
[0] = 0x01;
1954 if (s
->smart_selftest_count
== 0) {
1955 s
->io_buffer
[508] = 0;
1957 s
->io_buffer
[508] = s
->smart_selftest_count
;
1958 for (n
= 2; n
< 506; n
++) {
1959 s
->io_buffer
[n
] = s
->smart_selftest_data
[n
];
1963 for (n
= 0; n
< 511; n
++) {
1964 s
->io_buffer
[511] += s
->io_buffer
[n
];
1966 s
->io_buffer
[511] = 0x100 - s
->io_buffer
[511];
1971 s
->status
= READY_STAT
| SEEK_STAT
;
1972 ide_transfer_start(s
, s
->io_buffer
, 0x200, ide_transfer_stop
);
1973 ide_set_irq(s
->bus
);
1976 case SMART_EXECUTE_OFFLINE
:
1977 switch (s
->sector
) {
1978 case 0: /* off-line routine */
1979 case 1: /* short self test */
1980 case 2: /* extended self test */
1981 s
->smart_selftest_count
++;
1982 if (s
->smart_selftest_count
> 21) {
1983 s
->smart_selftest_count
= 1;
1985 n
= 2 + (s
->smart_selftest_count
- 1) * 24;
1986 s
->smart_selftest_data
[n
] = s
->sector
;
1987 s
->smart_selftest_data
[n
+ 1] = 0x00; /* OK and finished */
1988 s
->smart_selftest_data
[n
+ 2] = 0x34; /* hour count lsb */
1989 s
->smart_selftest_data
[n
+ 3] = 0x12; /* hour count msb */
1998 ide_abort_command(s
);
2002 #define HD_OK (1u << IDE_HD)
2003 #define CD_OK (1u << IDE_CD)
2004 #define CFA_OK (1u << IDE_CFATA)
2005 #define HD_CFA_OK (HD_OK | CFA_OK)
2006 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
2008 /* Set the Disk Seek Completed status bit during completion */
2009 #define SET_DSC (1u << 8)
2011 /* See ACS-2 T13/2015-D Table B.2 Command codes */
2012 static const struct {
2013 /* Returns true if the completion code should be run */
2014 bool (*handler
)(IDEState
*s
, uint8_t cmd
);
2016 } ide_cmd_table
[0x100] = {
2017 /* NOP not implemented, mandatory for CD */
2018 [CFA_REQ_EXT_ERROR_CODE
] = { cmd_cfa_req_ext_error_code
, CFA_OK
},
2019 [WIN_DSM
] = { cmd_data_set_management
, HD_CFA_OK
},
2020 [WIN_DEVICE_RESET
] = { cmd_device_reset
, CD_OK
},
2021 [WIN_RECAL
] = { cmd_nop
, HD_CFA_OK
| SET_DSC
},
2022 [WIN_READ
] = { cmd_read_pio
, ALL_OK
},
2023 [WIN_READ_ONCE
] = { cmd_read_pio
, HD_CFA_OK
},
2024 [WIN_READ_EXT
] = { cmd_read_pio
, HD_CFA_OK
},
2025 [WIN_READDMA_EXT
] = { cmd_read_dma
, HD_CFA_OK
},
2026 [WIN_READ_NATIVE_MAX_EXT
] = { cmd_read_native_max
, HD_CFA_OK
| SET_DSC
},
2027 [WIN_MULTREAD_EXT
] = { cmd_read_multiple
, HD_CFA_OK
},
2028 [WIN_WRITE
] = { cmd_write_pio
, HD_CFA_OK
},
2029 [WIN_WRITE_ONCE
] = { cmd_write_pio
, HD_CFA_OK
},
2030 [WIN_WRITE_EXT
] = { cmd_write_pio
, HD_CFA_OK
},
2031 [WIN_WRITEDMA_EXT
] = { cmd_write_dma
, HD_CFA_OK
},
2032 [CFA_WRITE_SECT_WO_ERASE
] = { cmd_write_pio
, CFA_OK
},
2033 [WIN_MULTWRITE_EXT
] = { cmd_write_multiple
, HD_CFA_OK
},
2034 [WIN_WRITE_VERIFY
] = { cmd_write_pio
, HD_CFA_OK
},
2035 [WIN_VERIFY
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
2036 [WIN_VERIFY_ONCE
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
2037 [WIN_VERIFY_EXT
] = { cmd_verify
, HD_CFA_OK
| SET_DSC
},
2038 [WIN_SEEK
] = { cmd_seek
, HD_CFA_OK
| SET_DSC
},
2039 [CFA_TRANSLATE_SECTOR
] = { cmd_cfa_translate_sector
, CFA_OK
},
2040 [WIN_DIAGNOSE
] = { cmd_exec_dev_diagnostic
, ALL_OK
},
2041 [WIN_SPECIFY
] = { cmd_nop
, HD_CFA_OK
| SET_DSC
},
2042 [WIN_STANDBYNOW2
] = { cmd_nop
, HD_CFA_OK
},
2043 [WIN_IDLEIMMEDIATE2
] = { cmd_nop
, HD_CFA_OK
},
2044 [WIN_STANDBY2
] = { cmd_nop
, HD_CFA_OK
},
2045 [WIN_SETIDLE2
] = { cmd_nop
, HD_CFA_OK
},
2046 [WIN_CHECKPOWERMODE2
] = { cmd_check_power_mode
, HD_CFA_OK
| SET_DSC
},
2047 [WIN_SLEEPNOW2
] = { cmd_nop
, HD_CFA_OK
},
2048 [WIN_PACKETCMD
] = { cmd_packet
, CD_OK
},
2049 [WIN_PIDENTIFY
] = { cmd_identify_packet
, CD_OK
},
2050 [WIN_SMART
] = { cmd_smart
, HD_CFA_OK
| SET_DSC
},
2051 [CFA_ACCESS_METADATA_STORAGE
] = { cmd_cfa_access_metadata_storage
, CFA_OK
},
2052 [CFA_ERASE_SECTORS
] = { cmd_cfa_erase_sectors
, CFA_OK
| SET_DSC
},
2053 [WIN_MULTREAD
] = { cmd_read_multiple
, HD_CFA_OK
},
2054 [WIN_MULTWRITE
] = { cmd_write_multiple
, HD_CFA_OK
},
2055 [WIN_SETMULT
] = { cmd_set_multiple_mode
, HD_CFA_OK
| SET_DSC
},
2056 [WIN_READDMA
] = { cmd_read_dma
, HD_CFA_OK
},
2057 [WIN_READDMA_ONCE
] = { cmd_read_dma
, HD_CFA_OK
},
2058 [WIN_WRITEDMA
] = { cmd_write_dma
, HD_CFA_OK
},
2059 [WIN_WRITEDMA_ONCE
] = { cmd_write_dma
, HD_CFA_OK
},
2060 [CFA_WRITE_MULTI_WO_ERASE
] = { cmd_write_multiple
, CFA_OK
},
2061 [WIN_STANDBYNOW1
] = { cmd_nop
, HD_CFA_OK
},
2062 [WIN_IDLEIMMEDIATE
] = { cmd_nop
, HD_CFA_OK
},
2063 [WIN_STANDBY
] = { cmd_nop
, HD_CFA_OK
},
2064 [WIN_SETIDLE1
] = { cmd_nop
, HD_CFA_OK
},
2065 [WIN_CHECKPOWERMODE1
] = { cmd_check_power_mode
, HD_CFA_OK
| SET_DSC
},
2066 [WIN_SLEEPNOW1
] = { cmd_nop
, HD_CFA_OK
},
2067 [WIN_FLUSH_CACHE
] = { cmd_flush_cache
, ALL_OK
},
2068 [WIN_FLUSH_CACHE_EXT
] = { cmd_flush_cache
, HD_CFA_OK
},
2069 [WIN_IDENTIFY
] = { cmd_identify
, ALL_OK
},
2070 [WIN_SETFEATURES
] = { cmd_set_features
, ALL_OK
| SET_DSC
},
2071 [IBM_SENSE_CONDITION
] = { cmd_ibm_sense_condition
, CFA_OK
| SET_DSC
},
2072 [CFA_WEAR_LEVEL
] = { cmd_cfa_erase_sectors
, HD_CFA_OK
| SET_DSC
},
2073 [WIN_READ_NATIVE_MAX
] = { cmd_read_native_max
, HD_CFA_OK
| SET_DSC
},
2076 static bool ide_cmd_permitted(IDEState
*s
, uint32_t cmd
)
2078 return cmd
< ARRAY_SIZE(ide_cmd_table
)
2079 && (ide_cmd_table
[cmd
].flags
& (1u << s
->drive_kind
));
2082 void ide_exec_cmd(IDEBus
*bus
, uint32_t val
)
2087 s
= idebus_active_if(bus
);
2088 trace_ide_exec_cmd(bus
, s
, val
);
2090 /* ignore commands to non existent slave */
2091 if (s
!= bus
->ifs
&& !s
->blk
) {
2095 /* Only RESET is allowed while BSY and/or DRQ are set,
2096 * and only to ATAPI devices. */
2097 if (s
->status
& (BUSY_STAT
|DRQ_STAT
)) {
2098 if (val
!= WIN_DEVICE_RESET
|| s
->drive_kind
!= IDE_CD
) {
2103 if (!ide_cmd_permitted(s
, val
)) {
2104 ide_abort_command(s
);
2105 ide_set_irq(s
->bus
);
2109 s
->status
= READY_STAT
| BUSY_STAT
;
2111 s
->io_buffer_offset
= 0;
2113 complete
= ide_cmd_table
[val
].handler(s
, val
);
2115 s
->status
&= ~BUSY_STAT
;
2116 assert(!!s
->error
== !!(s
->status
& ERR_STAT
));
2118 if ((ide_cmd_table
[val
].flags
& SET_DSC
) && !s
->error
) {
2119 s
->status
|= SEEK_STAT
;
2123 ide_set_irq(s
->bus
);
2127 /* IOport [R]ead [R]egisters */
2128 enum ATA_IOPORT_RR
{
2129 ATA_IOPORT_RR_DATA
= 0,
2130 ATA_IOPORT_RR_ERROR
= 1,
2131 ATA_IOPORT_RR_SECTOR_COUNT
= 2,
2132 ATA_IOPORT_RR_SECTOR_NUMBER
= 3,
2133 ATA_IOPORT_RR_CYLINDER_LOW
= 4,
2134 ATA_IOPORT_RR_CYLINDER_HIGH
= 5,
2135 ATA_IOPORT_RR_DEVICE_HEAD
= 6,
2136 ATA_IOPORT_RR_STATUS
= 7,
2137 ATA_IOPORT_RR_NUM_REGISTERS
,
2140 const char *ATA_IOPORT_RR_lookup
[ATA_IOPORT_RR_NUM_REGISTERS
] = {
2141 [ATA_IOPORT_RR_DATA
] = "Data",
2142 [ATA_IOPORT_RR_ERROR
] = "Error",
2143 [ATA_IOPORT_RR_SECTOR_COUNT
] = "Sector Count",
2144 [ATA_IOPORT_RR_SECTOR_NUMBER
] = "Sector Number",
2145 [ATA_IOPORT_RR_CYLINDER_LOW
] = "Cylinder Low",
2146 [ATA_IOPORT_RR_CYLINDER_HIGH
] = "Cylinder High",
2147 [ATA_IOPORT_RR_DEVICE_HEAD
] = "Device/Head",
2148 [ATA_IOPORT_RR_STATUS
] = "Status"
2151 uint32_t ide_ioport_read(void *opaque
, uint32_t addr
)
2153 IDEBus
*bus
= opaque
;
2154 IDEState
*s
= idebus_active_if(bus
);
2159 hob
= bus
->cmd
& (IDE_CTRL_HOB
);
2161 case ATA_IOPORT_RR_DATA
:
2164 case ATA_IOPORT_RR_ERROR
:
2165 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
2166 (s
!= bus
->ifs
&& !s
->blk
)) {
2171 ret
= s
->hob_feature
;
2174 case ATA_IOPORT_RR_SECTOR_COUNT
:
2175 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2178 ret
= s
->nsector
& 0xff;
2180 ret
= s
->hob_nsector
;
2183 case ATA_IOPORT_RR_SECTOR_NUMBER
:
2184 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2189 ret
= s
->hob_sector
;
2192 case ATA_IOPORT_RR_CYLINDER_LOW
:
2193 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2201 case ATA_IOPORT_RR_CYLINDER_HIGH
:
2202 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2210 case ATA_IOPORT_RR_DEVICE_HEAD
:
2211 if (!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) {
2218 case ATA_IOPORT_RR_STATUS
:
2219 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
2220 (s
!= bus
->ifs
&& !s
->blk
)) {
2225 qemu_irq_lower(bus
->irq
);
2229 trace_ide_ioport_read(addr
, ATA_IOPORT_RR_lookup
[reg_num
], ret
, bus
, s
);
2233 uint32_t ide_status_read(void *opaque
, uint32_t addr
)
2235 IDEBus
*bus
= opaque
;
2236 IDEState
*s
= idebus_active_if(bus
);
2239 if ((!bus
->ifs
[0].blk
&& !bus
->ifs
[1].blk
) ||
2240 (s
!= bus
->ifs
&& !s
->blk
)) {
2246 trace_ide_status_read(addr
, ret
, bus
, s
);
2250 static void ide_perform_srst(IDEState
*s
)
2252 s
->status
|= BUSY_STAT
;
2254 /* Halt PIO (Via register state); PIO BH remains scheduled. */
2255 ide_transfer_halt(s
);
2257 /* Cancel DMA -- may drain block device and invoke callbacks */
2258 ide_cancel_dma_sync(s
);
2260 /* Cancel PIO callback, reset registers/signature, etc */
2263 /* perform diagnostic */
2264 cmd_exec_dev_diagnostic(s
, WIN_DIAGNOSE
);
2267 static void ide_bus_perform_srst(void *opaque
)
2269 IDEBus
*bus
= opaque
;
2273 for (i
= 0; i
< 2; i
++) {
2275 ide_perform_srst(s
);
2278 bus
->cmd
&= ~IDE_CTRL_RESET
;
2281 void ide_ctrl_write(void *opaque
, uint32_t addr
, uint32_t val
)
2283 IDEBus
*bus
= opaque
;
2287 trace_ide_ctrl_write(addr
, val
, bus
);
2289 /* Device0 and Device1 each have their own control register,
2290 * but QEMU models it as just one register in the controller. */
2291 if (!(bus
->cmd
& IDE_CTRL_RESET
) && (val
& IDE_CTRL_RESET
)) {
2292 for (i
= 0; i
< 2; i
++) {
2294 s
->status
|= BUSY_STAT
;
2296 replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
2297 ide_bus_perform_srst
, bus
);
2304 * Returns true if the running PIO transfer is a PIO out (i.e. data is
2305 * transferred from the device to the guest), false if it's a PIO in
2307 static bool ide_is_pio_out(IDEState
*s
)
2309 if (s
->end_transfer_func
== ide_sector_write
||
2310 s
->end_transfer_func
== ide_atapi_cmd
) {
2312 } else if (s
->end_transfer_func
== ide_sector_read
||
2313 s
->end_transfer_func
== ide_transfer_stop
||
2314 s
->end_transfer_func
== ide_atapi_cmd_reply_end
||
2315 s
->end_transfer_func
== ide_dummy_transfer_stop
) {
2322 void ide_data_writew(void *opaque
, uint32_t addr
, uint32_t val
)
2324 IDEBus
*bus
= opaque
;
2325 IDEState
*s
= idebus_active_if(bus
);
2328 trace_ide_data_writew(addr
, val
, bus
, s
);
2330 /* PIO data access allowed only when DRQ bit is set. The result of a write
2331 * during PIO out is indeterminate, just ignore it. */
2332 if (!(s
->status
& DRQ_STAT
) || ide_is_pio_out(s
)) {
2337 if (p
+ 2 > s
->data_end
) {
2341 *(uint16_t *)p
= le16_to_cpu(val
);
2344 if (p
>= s
->data_end
) {
2345 s
->status
&= ~DRQ_STAT
;
2346 s
->end_transfer_func(s
);
2350 uint32_t ide_data_readw(void *opaque
, uint32_t addr
)
2352 IDEBus
*bus
= opaque
;
2353 IDEState
*s
= idebus_active_if(bus
);
2357 /* PIO data access allowed only when DRQ bit is set. The result of a read
2358 * during PIO in is indeterminate, return 0 and don't move forward. */
2359 if (!(s
->status
& DRQ_STAT
) || !ide_is_pio_out(s
)) {
2364 if (p
+ 2 > s
->data_end
) {
2368 ret
= cpu_to_le16(*(uint16_t *)p
);
2371 if (p
>= s
->data_end
) {
2372 s
->status
&= ~DRQ_STAT
;
2373 s
->end_transfer_func(s
);
2376 trace_ide_data_readw(addr
, ret
, bus
, s
);
2380 void ide_data_writel(void *opaque
, uint32_t addr
, uint32_t val
)
2382 IDEBus
*bus
= opaque
;
2383 IDEState
*s
= idebus_active_if(bus
);
2386 trace_ide_data_writel(addr
, val
, bus
, s
);
2388 /* PIO data access allowed only when DRQ bit is set. The result of a write
2389 * during PIO out is indeterminate, just ignore it. */
2390 if (!(s
->status
& DRQ_STAT
) || ide_is_pio_out(s
)) {
2395 if (p
+ 4 > s
->data_end
) {
2399 *(uint32_t *)p
= le32_to_cpu(val
);
2402 if (p
>= s
->data_end
) {
2403 s
->status
&= ~DRQ_STAT
;
2404 s
->end_transfer_func(s
);
2408 uint32_t ide_data_readl(void *opaque
, uint32_t addr
)
2410 IDEBus
*bus
= opaque
;
2411 IDEState
*s
= idebus_active_if(bus
);
2415 /* PIO data access allowed only when DRQ bit is set. The result of a read
2416 * during PIO in is indeterminate, return 0 and don't move forward. */
2417 if (!(s
->status
& DRQ_STAT
) || !ide_is_pio_out(s
)) {
2423 if (p
+ 4 > s
->data_end
) {
2427 ret
= cpu_to_le32(*(uint32_t *)p
);
2430 if (p
>= s
->data_end
) {
2431 s
->status
&= ~DRQ_STAT
;
2432 s
->end_transfer_func(s
);
2436 trace_ide_data_readl(addr
, ret
, bus
, s
);
2440 static void ide_dummy_transfer_stop(IDEState
*s
)
2442 s
->data_ptr
= s
->io_buffer
;
2443 s
->data_end
= s
->io_buffer
;
2444 s
->io_buffer
[0] = 0xff;
2445 s
->io_buffer
[1] = 0xff;
2446 s
->io_buffer
[2] = 0xff;
2447 s
->io_buffer
[3] = 0xff;
2450 void ide_bus_reset(IDEBus
*bus
)
2454 ide_reset(&bus
->ifs
[0]);
2455 ide_reset(&bus
->ifs
[1]);
2458 /* pending async DMA */
2459 if (bus
->dma
->aiocb
) {
2460 trace_ide_bus_reset_aio();
2461 blk_aio_cancel(bus
->dma
->aiocb
);
2462 bus
->dma
->aiocb
= NULL
;
2465 /* reset dma provider too */
2466 if (bus
->dma
->ops
->reset
) {
2467 bus
->dma
->ops
->reset(bus
->dma
);
2471 static bool ide_cd_is_tray_open(void *opaque
)
2473 return ((IDEState
*)opaque
)->tray_open
;
2476 static bool ide_cd_is_medium_locked(void *opaque
)
2478 return ((IDEState
*)opaque
)->tray_locked
;
2481 static void ide_resize_cb(void *opaque
)
2483 IDEState
*s
= opaque
;
2484 uint64_t nb_sectors
;
2486 if (!s
->identify_set
) {
2490 blk_get_geometry(s
->blk
, &nb_sectors
);
2491 s
->nb_sectors
= nb_sectors
;
2493 /* Update the identify data buffer. */
2494 if (s
->drive_kind
== IDE_CFATA
) {
2495 ide_cfata_identify_size(s
);
2497 /* IDE_CD uses a different set of callbacks entirely. */
2498 assert(s
->drive_kind
!= IDE_CD
);
2499 ide_identify_size(s
);
2503 static const BlockDevOps ide_cd_block_ops
= {
2504 .change_media_cb
= ide_cd_change_cb
,
2505 .eject_request_cb
= ide_cd_eject_request_cb
,
2506 .is_tray_open
= ide_cd_is_tray_open
,
2507 .is_medium_locked
= ide_cd_is_medium_locked
,
2510 static const BlockDevOps ide_hd_block_ops
= {
2511 .resize_cb
= ide_resize_cb
,
2514 int ide_init_drive(IDEState
*s
, BlockBackend
*blk
, IDEDriveKind kind
,
2515 const char *version
, const char *serial
, const char *model
,
2517 uint32_t cylinders
, uint32_t heads
, uint32_t secs
,
2518 int chs_trans
, Error
**errp
)
2520 uint64_t nb_sectors
;
2523 s
->drive_kind
= kind
;
2525 blk_get_geometry(blk
, &nb_sectors
);
2526 s
->cylinders
= cylinders
;
2529 s
->chs_trans
= chs_trans
;
2530 s
->nb_sectors
= nb_sectors
;
2532 /* The SMART values should be preserved across power cycles
2534 s
->smart_enabled
= 1;
2535 s
->smart_autosave
= 1;
2536 s
->smart_errors
= 0;
2537 s
->smart_selftest_count
= 0;
2538 if (kind
== IDE_CD
) {
2539 blk_set_dev_ops(blk
, &ide_cd_block_ops
, s
);
2540 blk_set_guest_block_size(blk
, 2048);
2542 if (!blk_is_inserted(s
->blk
)) {
2543 error_setg(errp
, "Device needs media, but drive is empty");
2546 if (!blk_is_writable(blk
)) {
2547 error_setg(errp
, "Can't use a read-only drive");
2550 blk_set_dev_ops(blk
, &ide_hd_block_ops
, s
);
2553 pstrcpy(s
->drive_serial_str
, sizeof(s
->drive_serial_str
), serial
);
2555 snprintf(s
->drive_serial_str
, sizeof(s
->drive_serial_str
),
2556 "QM%05d", s
->drive_serial
);
2559 pstrcpy(s
->drive_model_str
, sizeof(s
->drive_model_str
), model
);
2563 strcpy(s
->drive_model_str
, "QEMU DVD-ROM");
2566 strcpy(s
->drive_model_str
, "QEMU MICRODRIVE");
2569 strcpy(s
->drive_model_str
, "QEMU HARDDISK");
2575 pstrcpy(s
->version
, sizeof(s
->version
), version
);
2577 pstrcpy(s
->version
, sizeof(s
->version
), qemu_hw_version());
2581 blk_iostatus_enable(blk
);
2585 static void ide_init1(IDEBus
*bus
, int unit
)
2587 static int drive_serial
= 1;
2588 IDEState
*s
= &bus
->ifs
[unit
];
2592 s
->drive_serial
= drive_serial
++;
2593 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2594 s
->io_buffer_total_len
= IDE_DMA_BUF_SECTORS
*512 + 4;
2595 s
->io_buffer
= qemu_memalign(2048, s
->io_buffer_total_len
);
2596 memset(s
->io_buffer
, 0, s
->io_buffer_total_len
);
2598 s
->smart_selftest_data
= blk_blockalign(s
->blk
, 512);
2599 memset(s
->smart_selftest_data
, 0, 512);
2601 s
->sector_write_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
2602 ide_sector_write_timer_cb
, s
);
2605 static int ide_nop_int(const IDEDMA
*dma
, bool is_write
)
2610 static void ide_nop(const IDEDMA
*dma
)
2614 static int32_t ide_nop_int32(const IDEDMA
*dma
, int32_t l
)
2619 static const IDEDMAOps ide_dma_nop_ops
= {
2620 .prepare_buf
= ide_nop_int32
,
2621 .restart_dma
= ide_nop
,
2622 .rw_buf
= ide_nop_int
,
2625 static void ide_restart_dma(IDEState
*s
, enum ide_dma_cmd dma_cmd
)
2627 s
->unit
= s
->bus
->retry_unit
;
2628 ide_set_sector(s
, s
->bus
->retry_sector_num
);
2629 s
->nsector
= s
->bus
->retry_nsector
;
2630 s
->bus
->dma
->ops
->restart_dma(s
->bus
->dma
);
2631 s
->io_buffer_size
= 0;
2632 s
->dma_cmd
= dma_cmd
;
2633 ide_start_dma(s
, ide_dma_cb
);
2636 static void ide_restart_bh(void *opaque
)
2638 IDEBus
*bus
= opaque
;
2643 qemu_bh_delete(bus
->bh
);
2646 error_status
= bus
->error_status
;
2647 if (bus
->error_status
== 0) {
2651 s
= idebus_active_if(bus
);
2652 is_read
= (bus
->error_status
& IDE_RETRY_READ
) != 0;
2654 /* The error status must be cleared before resubmitting the request: The
2655 * request may fail again, and this case can only be distinguished if the
2656 * called function can set a new error status. */
2657 bus
->error_status
= 0;
2659 /* The HBA has generically asked to be kicked on retry */
2660 if (error_status
& IDE_RETRY_HBA
) {
2661 if (s
->bus
->dma
->ops
->restart
) {
2662 s
->bus
->dma
->ops
->restart(s
->bus
->dma
);
2664 } else if (IS_IDE_RETRY_DMA(error_status
)) {
2665 if (error_status
& IDE_RETRY_TRIM
) {
2666 ide_restart_dma(s
, IDE_DMA_TRIM
);
2668 ide_restart_dma(s
, is_read
? IDE_DMA_READ
: IDE_DMA_WRITE
);
2670 } else if (IS_IDE_RETRY_PIO(error_status
)) {
2674 ide_sector_write(s
);
2676 } else if (error_status
& IDE_RETRY_FLUSH
) {
2678 } else if (IS_IDE_RETRY_ATAPI(error_status
)) {
2679 assert(s
->end_transfer_func
== ide_atapi_cmd
);
2680 ide_atapi_dma_restart(s
);
2686 static void ide_restart_cb(void *opaque
, bool running
, RunState state
)
2688 IDEBus
*bus
= opaque
;
2694 bus
->bh
= qemu_bh_new(ide_restart_bh
, bus
);
2695 qemu_bh_schedule(bus
->bh
);
2699 void ide_register_restart_cb(IDEBus
*bus
)
2701 if (bus
->dma
->ops
->restart_dma
) {
2702 bus
->vmstate
= qemu_add_vm_change_state_handler(ide_restart_cb
, bus
);
2706 static IDEDMA ide_dma_nop
= {
2707 .ops
= &ide_dma_nop_ops
,
2711 void ide_init2(IDEBus
*bus
, qemu_irq irq
)
2715 for(i
= 0; i
< 2; i
++) {
2717 ide_reset(&bus
->ifs
[i
]);
2720 bus
->dma
= &ide_dma_nop
;
2723 void ide_exit(IDEState
*s
)
2725 timer_free(s
->sector_write_timer
);
2726 qemu_vfree(s
->smart_selftest_data
);
2727 qemu_vfree(s
->io_buffer
);
2730 static bool is_identify_set(void *opaque
, int version_id
)
2732 IDEState
*s
= opaque
;
2734 return s
->identify_set
!= 0;
2737 static EndTransferFunc
* transfer_end_table
[] = {
2741 ide_atapi_cmd_reply_end
,
2743 ide_dummy_transfer_stop
,
2746 static int transfer_end_table_idx(EndTransferFunc
*fn
)
2750 for (i
= 0; i
< ARRAY_SIZE(transfer_end_table
); i
++)
2751 if (transfer_end_table
[i
] == fn
)
2757 static int ide_drive_post_load(void *opaque
, int version_id
)
2759 IDEState
*s
= opaque
;
2761 if (s
->blk
&& s
->identify_set
) {
2762 blk_set_enable_write_cache(s
->blk
, !!(s
->identify_data
[85] & (1 << 5)));
2767 static int ide_drive_pio_post_load(void *opaque
, int version_id
)
2769 IDEState
*s
= opaque
;
2771 if (s
->end_transfer_fn_idx
>= ARRAY_SIZE(transfer_end_table
)) {
2774 s
->end_transfer_func
= transfer_end_table
[s
->end_transfer_fn_idx
];
2775 s
->data_ptr
= s
->io_buffer
+ s
->cur_io_buffer_offset
;
2776 s
->data_end
= s
->data_ptr
+ s
->cur_io_buffer_len
;
2777 s
->atapi_dma
= s
->feature
& 1; /* as per cmd_packet */
2782 static int ide_drive_pio_pre_save(void *opaque
)
2784 IDEState
*s
= opaque
;
2787 s
->cur_io_buffer_offset
= s
->data_ptr
- s
->io_buffer
;
2788 s
->cur_io_buffer_len
= s
->data_end
- s
->data_ptr
;
2790 idx
= transfer_end_table_idx(s
->end_transfer_func
);
2792 fprintf(stderr
, "%s: invalid end_transfer_func for DRQ_STAT\n",
2794 s
->end_transfer_fn_idx
= 2;
2796 s
->end_transfer_fn_idx
= idx
;
2802 static bool ide_drive_pio_state_needed(void *opaque
)
2804 IDEState
*s
= opaque
;
2806 return ((s
->status
& DRQ_STAT
) != 0)
2807 || (s
->bus
->error_status
& IDE_RETRY_PIO
);
2810 static bool ide_tray_state_needed(void *opaque
)
2812 IDEState
*s
= opaque
;
2814 return s
->tray_open
|| s
->tray_locked
;
2817 static bool ide_atapi_gesn_needed(void *opaque
)
2819 IDEState
*s
= opaque
;
2821 return s
->events
.new_media
|| s
->events
.eject_request
;
2824 static bool ide_error_needed(void *opaque
)
2826 IDEBus
*bus
= opaque
;
2828 return (bus
->error_status
!= 0);
2831 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2832 static const VMStateDescription vmstate_ide_atapi_gesn_state
= {
2833 .name
="ide_drive/atapi/gesn_state",
2835 .minimum_version_id
= 1,
2836 .needed
= ide_atapi_gesn_needed
,
2837 .fields
= (VMStateField
[]) {
2838 VMSTATE_BOOL(events
.new_media
, IDEState
),
2839 VMSTATE_BOOL(events
.eject_request
, IDEState
),
2840 VMSTATE_END_OF_LIST()
2844 static const VMStateDescription vmstate_ide_tray_state
= {
2845 .name
= "ide_drive/tray_state",
2847 .minimum_version_id
= 1,
2848 .needed
= ide_tray_state_needed
,
2849 .fields
= (VMStateField
[]) {
2850 VMSTATE_BOOL(tray_open
, IDEState
),
2851 VMSTATE_BOOL(tray_locked
, IDEState
),
2852 VMSTATE_END_OF_LIST()
2856 static const VMStateDescription vmstate_ide_drive_pio_state
= {
2857 .name
= "ide_drive/pio_state",
2859 .minimum_version_id
= 1,
2860 .pre_save
= ide_drive_pio_pre_save
,
2861 .post_load
= ide_drive_pio_post_load
,
2862 .needed
= ide_drive_pio_state_needed
,
2863 .fields
= (VMStateField
[]) {
2864 VMSTATE_INT32(req_nb_sectors
, IDEState
),
2865 VMSTATE_VARRAY_INT32(io_buffer
, IDEState
, io_buffer_total_len
, 1,
2866 vmstate_info_uint8
, uint8_t),
2867 VMSTATE_INT32(cur_io_buffer_offset
, IDEState
),
2868 VMSTATE_INT32(cur_io_buffer_len
, IDEState
),
2869 VMSTATE_UINT8(end_transfer_fn_idx
, IDEState
),
2870 VMSTATE_INT32(elementary_transfer_size
, IDEState
),
2871 VMSTATE_INT32(packet_transfer_size
, IDEState
),
2872 VMSTATE_END_OF_LIST()
2876 const VMStateDescription vmstate_ide_drive
= {
2877 .name
= "ide_drive",
2879 .minimum_version_id
= 0,
2880 .post_load
= ide_drive_post_load
,
2881 .fields
= (VMStateField
[]) {
2882 VMSTATE_INT32(mult_sectors
, IDEState
),
2883 VMSTATE_INT32(identify_set
, IDEState
),
2884 VMSTATE_BUFFER_TEST(identify_data
, IDEState
, is_identify_set
),
2885 VMSTATE_UINT8(feature
, IDEState
),
2886 VMSTATE_UINT8(error
, IDEState
),
2887 VMSTATE_UINT32(nsector
, IDEState
),
2888 VMSTATE_UINT8(sector
, IDEState
),
2889 VMSTATE_UINT8(lcyl
, IDEState
),
2890 VMSTATE_UINT8(hcyl
, IDEState
),
2891 VMSTATE_UINT8(hob_feature
, IDEState
),
2892 VMSTATE_UINT8(hob_sector
, IDEState
),
2893 VMSTATE_UINT8(hob_nsector
, IDEState
),
2894 VMSTATE_UINT8(hob_lcyl
, IDEState
),
2895 VMSTATE_UINT8(hob_hcyl
, IDEState
),
2896 VMSTATE_UINT8(select
, IDEState
),
2897 VMSTATE_UINT8(status
, IDEState
),
2898 VMSTATE_UINT8(lba48
, IDEState
),
2899 VMSTATE_UINT8(sense_key
, IDEState
),
2900 VMSTATE_UINT8(asc
, IDEState
),
2901 VMSTATE_UINT8_V(cdrom_changed
, IDEState
, 3),
2902 VMSTATE_END_OF_LIST()
2904 .subsections
= (const VMStateDescription
*[]) {
2905 &vmstate_ide_drive_pio_state
,
2906 &vmstate_ide_tray_state
,
2907 &vmstate_ide_atapi_gesn_state
,
2912 static const VMStateDescription vmstate_ide_error_status
= {
2913 .name
="ide_bus/error",
2915 .minimum_version_id
= 1,
2916 .needed
= ide_error_needed
,
2917 .fields
= (VMStateField
[]) {
2918 VMSTATE_INT32(error_status
, IDEBus
),
2919 VMSTATE_INT64_V(retry_sector_num
, IDEBus
, 2),
2920 VMSTATE_UINT32_V(retry_nsector
, IDEBus
, 2),
2921 VMSTATE_UINT8_V(retry_unit
, IDEBus
, 2),
2922 VMSTATE_END_OF_LIST()
2926 const VMStateDescription vmstate_ide_bus
= {
2929 .minimum_version_id
= 1,
2930 .fields
= (VMStateField
[]) {
2931 VMSTATE_UINT8(cmd
, IDEBus
),
2932 VMSTATE_UINT8(unit
, IDEBus
),
2933 VMSTATE_END_OF_LIST()
2935 .subsections
= (const VMStateDescription
*[]) {
2936 &vmstate_ide_error_status
,
2941 void ide_drive_get(DriveInfo
**hd
, int n
)
2945 for (i
= 0; i
< n
; i
++) {
2946 hd
[i
] = drive_get_by_index(IF_IDE
, i
);