]> git.proxmox.com Git - mirror_qemu.git/blob - hw/ide/core.c
ide: add support for IDEBufferedRequest
[mirror_qemu.git] / hw / ide / core.c
1 /*
2 * QEMU IDE disk and CD/DVD-ROM Emulator
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25 #include <hw/hw.h>
26 #include <hw/i386/pc.h>
27 #include <hw/pci/pci.h>
28 #include <hw/isa/isa.h>
29 #include "qemu/error-report.h"
30 #include "qemu/timer.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/dma.h"
33 #include "hw/block/block.h"
34 #include "sysemu/block-backend.h"
35
36 #include <hw/ide/internal.h>
37
38 /* These values were based on a Seagate ST3500418AS but have been modified
39 to make more sense in QEMU */
40 static const int smart_attributes[][12] = {
41 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
42 /* raw read error rate*/
43 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
44 /* spin up */
45 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
46 /* start stop count */
47 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
48 /* remapped sectors */
49 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
50 /* power on hours */
51 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
52 /* power cycle count */
53 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54 /* airflow-temperature-celsius */
55 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
56 };
57
58 static int ide_handle_rw_error(IDEState *s, int error, int op);
59 static void ide_dummy_transfer_stop(IDEState *s);
60
61 static void padstr(char *str, const char *src, int len)
62 {
63 int i, v;
64 for(i = 0; i < len; i++) {
65 if (*src)
66 v = *src++;
67 else
68 v = ' ';
69 str[i^1] = v;
70 }
71 }
72
73 static void put_le16(uint16_t *p, unsigned int v)
74 {
75 *p = cpu_to_le16(v);
76 }
77
78 static void ide_identify_size(IDEState *s)
79 {
80 uint16_t *p = (uint16_t *)s->identify_data;
81 put_le16(p + 60, s->nb_sectors);
82 put_le16(p + 61, s->nb_sectors >> 16);
83 put_le16(p + 100, s->nb_sectors);
84 put_le16(p + 101, s->nb_sectors >> 16);
85 put_le16(p + 102, s->nb_sectors >> 32);
86 put_le16(p + 103, s->nb_sectors >> 48);
87 }
88
89 static void ide_identify(IDEState *s)
90 {
91 uint16_t *p;
92 unsigned int oldsize;
93 IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
94
95 p = (uint16_t *)s->identify_data;
96 if (s->identify_set) {
97 goto fill_buffer;
98 }
99 memset(p, 0, sizeof(s->identify_data));
100
101 put_le16(p + 0, 0x0040);
102 put_le16(p + 1, s->cylinders);
103 put_le16(p + 3, s->heads);
104 put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
105 put_le16(p + 5, 512); /* XXX: retired, remove ? */
106 put_le16(p + 6, s->sectors);
107 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
108 put_le16(p + 20, 3); /* XXX: retired, remove ? */
109 put_le16(p + 21, 512); /* cache size in sectors */
110 put_le16(p + 22, 4); /* ecc bytes */
111 padstr((char *)(p + 23), s->version, 8); /* firmware version */
112 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
113 #if MAX_MULT_SECTORS > 1
114 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
115 #endif
116 put_le16(p + 48, 1); /* dword I/O */
117 put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
118 put_le16(p + 51, 0x200); /* PIO transfer cycle */
119 put_le16(p + 52, 0x200); /* DMA transfer cycle */
120 put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
121 put_le16(p + 54, s->cylinders);
122 put_le16(p + 55, s->heads);
123 put_le16(p + 56, s->sectors);
124 oldsize = s->cylinders * s->heads * s->sectors;
125 put_le16(p + 57, oldsize);
126 put_le16(p + 58, oldsize >> 16);
127 if (s->mult_sectors)
128 put_le16(p + 59, 0x100 | s->mult_sectors);
129 /* *(p + 60) := nb_sectors -- see ide_identify_size */
130 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
131 put_le16(p + 62, 0x07); /* single word dma0-2 supported */
132 put_le16(p + 63, 0x07); /* mdma0-2 supported */
133 put_le16(p + 64, 0x03); /* pio3-4 supported */
134 put_le16(p + 65, 120);
135 put_le16(p + 66, 120);
136 put_le16(p + 67, 120);
137 put_le16(p + 68, 120);
138 if (dev && dev->conf.discard_granularity) {
139 put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
140 }
141
142 if (s->ncq_queues) {
143 put_le16(p + 75, s->ncq_queues - 1);
144 /* NCQ supported */
145 put_le16(p + 76, (1 << 8));
146 }
147
148 put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
149 put_le16(p + 81, 0x16); /* conforms to ata5 */
150 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
151 put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
152 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
153 put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
154 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
155 if (s->wwn) {
156 put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
157 } else {
158 put_le16(p + 84, (1 << 14) | 0);
159 }
160 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
161 if (blk_enable_write_cache(s->blk)) {
162 put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
163 } else {
164 put_le16(p + 85, (1 << 14) | 1);
165 }
166 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
167 put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
168 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
169 if (s->wwn) {
170 put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
171 } else {
172 put_le16(p + 87, (1 << 14) | 0);
173 }
174 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
175 put_le16(p + 93, 1 | (1 << 14) | 0x2000);
176 /* *(p + 100) := nb_sectors -- see ide_identify_size */
177 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
178 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
179 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
180
181 if (dev && dev->conf.physical_block_size)
182 put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
183 if (s->wwn) {
184 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
185 put_le16(p + 108, s->wwn >> 48);
186 put_le16(p + 109, s->wwn >> 32);
187 put_le16(p + 110, s->wwn >> 16);
188 put_le16(p + 111, s->wwn);
189 }
190 if (dev && dev->conf.discard_granularity) {
191 put_le16(p + 169, 1); /* TRIM support */
192 }
193
194 ide_identify_size(s);
195 s->identify_set = 1;
196
197 fill_buffer:
198 memcpy(s->io_buffer, p, sizeof(s->identify_data));
199 }
200
201 static void ide_atapi_identify(IDEState *s)
202 {
203 uint16_t *p;
204
205 p = (uint16_t *)s->identify_data;
206 if (s->identify_set) {
207 goto fill_buffer;
208 }
209 memset(p, 0, sizeof(s->identify_data));
210
211 /* Removable CDROM, 50us response, 12 byte packets */
212 put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
213 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
214 put_le16(p + 20, 3); /* buffer type */
215 put_le16(p + 21, 512); /* cache size in sectors */
216 put_le16(p + 22, 4); /* ecc bytes */
217 padstr((char *)(p + 23), s->version, 8); /* firmware version */
218 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
219 put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
220 #ifdef USE_DMA_CDROM
221 put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
222 put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
223 put_le16(p + 62, 7); /* single word dma0-2 supported */
224 put_le16(p + 63, 7); /* mdma0-2 supported */
225 #else
226 put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
227 put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
228 put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
229 #endif
230 put_le16(p + 64, 3); /* pio3-4 supported */
231 put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
232 put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
233 put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
234 put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
235
236 put_le16(p + 71, 30); /* in ns */
237 put_le16(p + 72, 30); /* in ns */
238
239 if (s->ncq_queues) {
240 put_le16(p + 75, s->ncq_queues - 1);
241 /* NCQ supported */
242 put_le16(p + 76, (1 << 8));
243 }
244
245 put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
246 if (s->wwn) {
247 put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
248 put_le16(p + 87, (1 << 8)); /* WWN enabled */
249 }
250
251 #ifdef USE_DMA_CDROM
252 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
253 #endif
254
255 if (s->wwn) {
256 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
257 put_le16(p + 108, s->wwn >> 48);
258 put_le16(p + 109, s->wwn >> 32);
259 put_le16(p + 110, s->wwn >> 16);
260 put_le16(p + 111, s->wwn);
261 }
262
263 s->identify_set = 1;
264
265 fill_buffer:
266 memcpy(s->io_buffer, p, sizeof(s->identify_data));
267 }
268
269 static void ide_cfata_identify_size(IDEState *s)
270 {
271 uint16_t *p = (uint16_t *)s->identify_data;
272 put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
273 put_le16(p + 8, s->nb_sectors); /* Sectors per card */
274 put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
275 put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
276 }
277
278 static void ide_cfata_identify(IDEState *s)
279 {
280 uint16_t *p;
281 uint32_t cur_sec;
282
283 p = (uint16_t *)s->identify_data;
284 if (s->identify_set) {
285 goto fill_buffer;
286 }
287 memset(p, 0, sizeof(s->identify_data));
288
289 cur_sec = s->cylinders * s->heads * s->sectors;
290
291 put_le16(p + 0, 0x848a); /* CF Storage Card signature */
292 put_le16(p + 1, s->cylinders); /* Default cylinders */
293 put_le16(p + 3, s->heads); /* Default heads */
294 put_le16(p + 6, s->sectors); /* Default sectors per track */
295 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
296 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
297 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
298 put_le16(p + 22, 0x0004); /* ECC bytes */
299 padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
300 padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
301 #if MAX_MULT_SECTORS > 1
302 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
303 #else
304 put_le16(p + 47, 0x0000);
305 #endif
306 put_le16(p + 49, 0x0f00); /* Capabilities */
307 put_le16(p + 51, 0x0002); /* PIO cycle timing mode */
308 put_le16(p + 52, 0x0001); /* DMA cycle timing mode */
309 put_le16(p + 53, 0x0003); /* Translation params valid */
310 put_le16(p + 54, s->cylinders); /* Current cylinders */
311 put_le16(p + 55, s->heads); /* Current heads */
312 put_le16(p + 56, s->sectors); /* Current sectors */
313 put_le16(p + 57, cur_sec); /* Current capacity */
314 put_le16(p + 58, cur_sec >> 16); /* Current capacity */
315 if (s->mult_sectors) /* Multiple sector setting */
316 put_le16(p + 59, 0x100 | s->mult_sectors);
317 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
318 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
319 put_le16(p + 63, 0x0203); /* Multiword DMA capability */
320 put_le16(p + 64, 0x0001); /* Flow Control PIO support */
321 put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
322 put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */
323 put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */
324 put_le16(p + 82, 0x400c); /* Command Set supported */
325 put_le16(p + 83, 0x7068); /* Command Set supported */
326 put_le16(p + 84, 0x4000); /* Features supported */
327 put_le16(p + 85, 0x000c); /* Command Set enabled */
328 put_le16(p + 86, 0x7044); /* Command Set enabled */
329 put_le16(p + 87, 0x4000); /* Features enabled */
330 put_le16(p + 91, 0x4060); /* Current APM level */
331 put_le16(p + 129, 0x0002); /* Current features option */
332 put_le16(p + 130, 0x0005); /* Reassigned sectors */
333 put_le16(p + 131, 0x0001); /* Initial power mode */
334 put_le16(p + 132, 0x0000); /* User signature */
335 put_le16(p + 160, 0x8100); /* Power requirement */
336 put_le16(p + 161, 0x8001); /* CF command set */
337
338 ide_cfata_identify_size(s);
339 s->identify_set = 1;
340
341 fill_buffer:
342 memcpy(s->io_buffer, p, sizeof(s->identify_data));
343 }
344
345 static void ide_set_signature(IDEState *s)
346 {
347 s->select &= 0xf0; /* clear head */
348 /* put signature */
349 s->nsector = 1;
350 s->sector = 1;
351 if (s->drive_kind == IDE_CD) {
352 s->lcyl = 0x14;
353 s->hcyl = 0xeb;
354 } else if (s->blk) {
355 s->lcyl = 0;
356 s->hcyl = 0;
357 } else {
358 s->lcyl = 0xff;
359 s->hcyl = 0xff;
360 }
361 }
362
363 typedef struct TrimAIOCB {
364 BlockAIOCB common;
365 BlockBackend *blk;
366 QEMUBH *bh;
367 int ret;
368 QEMUIOVector *qiov;
369 BlockAIOCB *aiocb;
370 int i, j;
371 } TrimAIOCB;
372
373 static void trim_aio_cancel(BlockAIOCB *acb)
374 {
375 TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
376
377 /* Exit the loop so ide_issue_trim_cb will not continue */
378 iocb->j = iocb->qiov->niov - 1;
379 iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
380
381 iocb->ret = -ECANCELED;
382
383 if (iocb->aiocb) {
384 blk_aio_cancel_async(iocb->aiocb);
385 iocb->aiocb = NULL;
386 }
387 }
388
389 static const AIOCBInfo trim_aiocb_info = {
390 .aiocb_size = sizeof(TrimAIOCB),
391 .cancel_async = trim_aio_cancel,
392 };
393
394 static void ide_trim_bh_cb(void *opaque)
395 {
396 TrimAIOCB *iocb = opaque;
397
398 iocb->common.cb(iocb->common.opaque, iocb->ret);
399
400 qemu_bh_delete(iocb->bh);
401 iocb->bh = NULL;
402 qemu_aio_unref(iocb);
403 }
404
405 static void ide_issue_trim_cb(void *opaque, int ret)
406 {
407 TrimAIOCB *iocb = opaque;
408 if (ret >= 0) {
409 while (iocb->j < iocb->qiov->niov) {
410 int j = iocb->j;
411 while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
412 int i = iocb->i;
413 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
414
415 /* 6-byte LBA + 2-byte range per entry */
416 uint64_t entry = le64_to_cpu(buffer[i]);
417 uint64_t sector = entry & 0x0000ffffffffffffULL;
418 uint16_t count = entry >> 48;
419
420 if (count == 0) {
421 continue;
422 }
423
424 /* Got an entry! Submit and exit. */
425 iocb->aiocb = blk_aio_discard(iocb->blk, sector, count,
426 ide_issue_trim_cb, opaque);
427 return;
428 }
429
430 iocb->j++;
431 iocb->i = -1;
432 }
433 } else {
434 iocb->ret = ret;
435 }
436
437 iocb->aiocb = NULL;
438 if (iocb->bh) {
439 qemu_bh_schedule(iocb->bh);
440 }
441 }
442
443 BlockAIOCB *ide_issue_trim(BlockBackend *blk,
444 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
445 BlockCompletionFunc *cb, void *opaque)
446 {
447 TrimAIOCB *iocb;
448
449 iocb = blk_aio_get(&trim_aiocb_info, blk, cb, opaque);
450 iocb->blk = blk;
451 iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
452 iocb->ret = 0;
453 iocb->qiov = qiov;
454 iocb->i = -1;
455 iocb->j = 0;
456 ide_issue_trim_cb(iocb, 0);
457 return &iocb->common;
458 }
459
460 void ide_abort_command(IDEState *s)
461 {
462 ide_transfer_stop(s);
463 s->status = READY_STAT | ERR_STAT;
464 s->error = ABRT_ERR;
465 }
466
467 /* prepare data transfer and tell what to do after */
468 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
469 EndTransferFunc *end_transfer_func)
470 {
471 s->end_transfer_func = end_transfer_func;
472 s->data_ptr = buf;
473 s->data_end = buf + size;
474 if (!(s->status & ERR_STAT)) {
475 s->status |= DRQ_STAT;
476 }
477 if (s->bus->dma->ops->start_transfer) {
478 s->bus->dma->ops->start_transfer(s->bus->dma);
479 }
480 }
481
482 static void ide_cmd_done(IDEState *s)
483 {
484 if (s->bus->dma->ops->cmd_done) {
485 s->bus->dma->ops->cmd_done(s->bus->dma);
486 }
487 }
488
489 void ide_transfer_stop(IDEState *s)
490 {
491 s->end_transfer_func = ide_transfer_stop;
492 s->data_ptr = s->io_buffer;
493 s->data_end = s->io_buffer;
494 s->status &= ~DRQ_STAT;
495 ide_cmd_done(s);
496 }
497
498 int64_t ide_get_sector(IDEState *s)
499 {
500 int64_t sector_num;
501 if (s->select & 0x40) {
502 /* lba */
503 if (!s->lba48) {
504 sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) |
505 (s->lcyl << 8) | s->sector;
506 } else {
507 sector_num = ((int64_t)s->hob_hcyl << 40) |
508 ((int64_t) s->hob_lcyl << 32) |
509 ((int64_t) s->hob_sector << 24) |
510 ((int64_t) s->hcyl << 16) |
511 ((int64_t) s->lcyl << 8) | s->sector;
512 }
513 } else {
514 sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
515 (s->select & 0x0f) * s->sectors + (s->sector - 1);
516 }
517 return sector_num;
518 }
519
520 void ide_set_sector(IDEState *s, int64_t sector_num)
521 {
522 unsigned int cyl, r;
523 if (s->select & 0x40) {
524 if (!s->lba48) {
525 s->select = (s->select & 0xf0) | (sector_num >> 24);
526 s->hcyl = (sector_num >> 16);
527 s->lcyl = (sector_num >> 8);
528 s->sector = (sector_num);
529 } else {
530 s->sector = sector_num;
531 s->lcyl = sector_num >> 8;
532 s->hcyl = sector_num >> 16;
533 s->hob_sector = sector_num >> 24;
534 s->hob_lcyl = sector_num >> 32;
535 s->hob_hcyl = sector_num >> 40;
536 }
537 } else {
538 cyl = sector_num / (s->heads * s->sectors);
539 r = sector_num % (s->heads * s->sectors);
540 s->hcyl = cyl >> 8;
541 s->lcyl = cyl;
542 s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f);
543 s->sector = (r % s->sectors) + 1;
544 }
545 }
546
547 static void ide_rw_error(IDEState *s) {
548 ide_abort_command(s);
549 ide_set_irq(s->bus);
550 }
551
552 static bool ide_sect_range_ok(IDEState *s,
553 uint64_t sector, uint64_t nb_sectors)
554 {
555 uint64_t total_sectors;
556
557 blk_get_geometry(s->blk, &total_sectors);
558 if (sector > total_sectors || nb_sectors > total_sectors - sector) {
559 return false;
560 }
561 return true;
562 }
563
564 static void ide_buffered_readv_cb(void *opaque, int ret)
565 {
566 IDEBufferedRequest *req = opaque;
567 if (!req->orphaned) {
568 if (!ret) {
569 qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
570 req->original_qiov->size);
571 }
572 req->original_cb(req->original_opaque, ret);
573 }
574 QLIST_REMOVE(req, list);
575 qemu_vfree(req->iov.iov_base);
576 g_free(req);
577 }
578
579 #define MAX_BUFFERED_REQS 16
580
581 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
582 QEMUIOVector *iov, int nb_sectors,
583 BlockCompletionFunc *cb, void *opaque)
584 {
585 BlockAIOCB *aioreq;
586 IDEBufferedRequest *req;
587 int c = 0;
588
589 QLIST_FOREACH(req, &s->buffered_requests, list) {
590 c++;
591 }
592 if (c > MAX_BUFFERED_REQS) {
593 return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
594 }
595
596 req = g_new0(IDEBufferedRequest, 1);
597 req->original_qiov = iov;
598 req->original_cb = cb;
599 req->original_opaque = opaque;
600 req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
601 req->iov.iov_len = iov->size;
602 qemu_iovec_init_external(&req->qiov, &req->iov, 1);
603
604 aioreq = blk_aio_readv(s->blk, sector_num, &req->qiov, nb_sectors,
605 ide_buffered_readv_cb, req);
606
607 QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
608 return aioreq;
609 }
610
611 static void ide_sector_read(IDEState *s);
612
613 static void ide_sector_read_cb(void *opaque, int ret)
614 {
615 IDEState *s = opaque;
616 int n;
617
618 s->pio_aiocb = NULL;
619 s->status &= ~BUSY_STAT;
620
621 if (ret == -ECANCELED) {
622 return;
623 }
624 if (ret != 0) {
625 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
626 IDE_RETRY_READ)) {
627 return;
628 }
629 }
630
631 block_acct_done(blk_get_stats(s->blk), &s->acct);
632
633 n = s->nsector;
634 if (n > s->req_nb_sectors) {
635 n = s->req_nb_sectors;
636 }
637
638 ide_set_sector(s, ide_get_sector(s) + n);
639 s->nsector -= n;
640 /* Allow the guest to read the io_buffer */
641 ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
642 ide_set_irq(s->bus);
643 }
644
645 static void ide_sector_read(IDEState *s)
646 {
647 int64_t sector_num;
648 int n;
649
650 s->status = READY_STAT | SEEK_STAT;
651 s->error = 0; /* not needed by IDE spec, but needed by Windows */
652 sector_num = ide_get_sector(s);
653 n = s->nsector;
654
655 if (n == 0) {
656 ide_transfer_stop(s);
657 return;
658 }
659
660 s->status |= BUSY_STAT;
661
662 if (n > s->req_nb_sectors) {
663 n = s->req_nb_sectors;
664 }
665
666 #if defined(DEBUG_IDE)
667 printf("sector=%" PRId64 "\n", sector_num);
668 #endif
669
670 if (!ide_sect_range_ok(s, sector_num, n)) {
671 ide_rw_error(s);
672 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
673 return;
674 }
675
676 s->iov.iov_base = s->io_buffer;
677 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
678 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
679
680 block_acct_start(blk_get_stats(s->blk), &s->acct,
681 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
682 s->pio_aiocb = blk_aio_readv(s->blk, sector_num, &s->qiov, n,
683 ide_sector_read_cb, s);
684 }
685
686 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
687 {
688 if (s->bus->dma->ops->commit_buf) {
689 s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
690 }
691 s->io_buffer_offset += tx_bytes;
692 qemu_sglist_destroy(&s->sg);
693 }
694
695 void ide_set_inactive(IDEState *s, bool more)
696 {
697 s->bus->dma->aiocb = NULL;
698 s->bus->retry_unit = -1;
699 s->bus->retry_sector_num = 0;
700 s->bus->retry_nsector = 0;
701 if (s->bus->dma->ops->set_inactive) {
702 s->bus->dma->ops->set_inactive(s->bus->dma, more);
703 }
704 ide_cmd_done(s);
705 }
706
707 void ide_dma_error(IDEState *s)
708 {
709 dma_buf_commit(s, 0);
710 ide_abort_command(s);
711 ide_set_inactive(s, false);
712 ide_set_irq(s->bus);
713 }
714
715 static int ide_handle_rw_error(IDEState *s, int error, int op)
716 {
717 bool is_read = (op & IDE_RETRY_READ) != 0;
718 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
719
720 if (action == BLOCK_ERROR_ACTION_STOP) {
721 assert(s->bus->retry_unit == s->unit);
722 s->bus->error_status = op;
723 } else if (action == BLOCK_ERROR_ACTION_REPORT) {
724 block_acct_failed(blk_get_stats(s->blk), &s->acct);
725 if (op & IDE_RETRY_DMA) {
726 ide_dma_error(s);
727 } else {
728 ide_rw_error(s);
729 }
730 }
731 blk_error_action(s->blk, action, is_read, error);
732 return action != BLOCK_ERROR_ACTION_IGNORE;
733 }
734
735 static void ide_dma_cb(void *opaque, int ret)
736 {
737 IDEState *s = opaque;
738 int n;
739 int64_t sector_num;
740 bool stay_active = false;
741
742 if (ret == -ECANCELED) {
743 return;
744 }
745 if (ret < 0) {
746 int op = IDE_RETRY_DMA;
747
748 if (s->dma_cmd == IDE_DMA_READ)
749 op |= IDE_RETRY_READ;
750 else if (s->dma_cmd == IDE_DMA_TRIM)
751 op |= IDE_RETRY_TRIM;
752
753 if (ide_handle_rw_error(s, -ret, op)) {
754 return;
755 }
756 }
757
758 n = s->io_buffer_size >> 9;
759 if (n > s->nsector) {
760 /* The PRDs were longer than needed for this request. Shorten them so
761 * we don't get a negative remainder. The Active bit must remain set
762 * after the request completes. */
763 n = s->nsector;
764 stay_active = true;
765 }
766
767 sector_num = ide_get_sector(s);
768 if (n > 0) {
769 assert(n * 512 == s->sg.size);
770 dma_buf_commit(s, s->sg.size);
771 sector_num += n;
772 ide_set_sector(s, sector_num);
773 s->nsector -= n;
774 }
775
776 /* end of transfer ? */
777 if (s->nsector == 0) {
778 s->status = READY_STAT | SEEK_STAT;
779 ide_set_irq(s->bus);
780 goto eot;
781 }
782
783 /* launch next transfer */
784 n = s->nsector;
785 s->io_buffer_index = 0;
786 s->io_buffer_size = n * 512;
787 if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
788 /* The PRDs were too short. Reset the Active bit, but don't raise an
789 * interrupt. */
790 s->status = READY_STAT | SEEK_STAT;
791 dma_buf_commit(s, 0);
792 goto eot;
793 }
794
795 #ifdef DEBUG_AIO
796 printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n",
797 sector_num, n, s->dma_cmd);
798 #endif
799
800 if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
801 !ide_sect_range_ok(s, sector_num, n)) {
802 ide_dma_error(s);
803 block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
804 return;
805 }
806
807 switch (s->dma_cmd) {
808 case IDE_DMA_READ:
809 s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, sector_num,
810 ide_dma_cb, s);
811 break;
812 case IDE_DMA_WRITE:
813 s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, sector_num,
814 ide_dma_cb, s);
815 break;
816 case IDE_DMA_TRIM:
817 s->bus->dma->aiocb = dma_blk_io(s->blk, &s->sg, sector_num,
818 ide_issue_trim, ide_dma_cb, s,
819 DMA_DIRECTION_TO_DEVICE);
820 break;
821 }
822 return;
823
824 eot:
825 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
826 block_acct_done(blk_get_stats(s->blk), &s->acct);
827 }
828 ide_set_inactive(s, stay_active);
829 }
830
831 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
832 {
833 s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT;
834 s->io_buffer_size = 0;
835 s->dma_cmd = dma_cmd;
836
837 switch (dma_cmd) {
838 case IDE_DMA_READ:
839 block_acct_start(blk_get_stats(s->blk), &s->acct,
840 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
841 break;
842 case IDE_DMA_WRITE:
843 block_acct_start(blk_get_stats(s->blk), &s->acct,
844 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
845 break;
846 default:
847 break;
848 }
849
850 ide_start_dma(s, ide_dma_cb);
851 }
852
853 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
854 {
855 s->io_buffer_index = 0;
856 s->bus->retry_unit = s->unit;
857 s->bus->retry_sector_num = ide_get_sector(s);
858 s->bus->retry_nsector = s->nsector;
859 if (s->bus->dma->ops->start_dma) {
860 s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
861 }
862 }
863
864 static void ide_sector_write(IDEState *s);
865
866 static void ide_sector_write_timer_cb(void *opaque)
867 {
868 IDEState *s = opaque;
869 ide_set_irq(s->bus);
870 }
871
872 static void ide_sector_write_cb(void *opaque, int ret)
873 {
874 IDEState *s = opaque;
875 int n;
876
877 if (ret == -ECANCELED) {
878 return;
879 }
880
881 s->pio_aiocb = NULL;
882 s->status &= ~BUSY_STAT;
883
884 if (ret != 0) {
885 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
886 return;
887 }
888 }
889
890 block_acct_done(blk_get_stats(s->blk), &s->acct);
891
892 n = s->nsector;
893 if (n > s->req_nb_sectors) {
894 n = s->req_nb_sectors;
895 }
896 s->nsector -= n;
897
898 ide_set_sector(s, ide_get_sector(s) + n);
899 if (s->nsector == 0) {
900 /* no more sectors to write */
901 ide_transfer_stop(s);
902 } else {
903 int n1 = s->nsector;
904 if (n1 > s->req_nb_sectors) {
905 n1 = s->req_nb_sectors;
906 }
907 ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
908 ide_sector_write);
909 }
910
911 if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
912 /* It seems there is a bug in the Windows 2000 installer HDD
913 IDE driver which fills the disk with empty logs when the
914 IDE write IRQ comes too early. This hack tries to correct
915 that at the expense of slower write performances. Use this
916 option _only_ to install Windows 2000. You must disable it
917 for normal use. */
918 timer_mod(s->sector_write_timer,
919 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 1000));
920 } else {
921 ide_set_irq(s->bus);
922 }
923 }
924
925 static void ide_sector_write(IDEState *s)
926 {
927 int64_t sector_num;
928 int n;
929
930 s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
931 sector_num = ide_get_sector(s);
932 #if defined(DEBUG_IDE)
933 printf("sector=%" PRId64 "\n", sector_num);
934 #endif
935 n = s->nsector;
936 if (n > s->req_nb_sectors) {
937 n = s->req_nb_sectors;
938 }
939
940 if (!ide_sect_range_ok(s, sector_num, n)) {
941 ide_rw_error(s);
942 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
943 return;
944 }
945
946 s->iov.iov_base = s->io_buffer;
947 s->iov.iov_len = n * BDRV_SECTOR_SIZE;
948 qemu_iovec_init_external(&s->qiov, &s->iov, 1);
949
950 block_acct_start(blk_get_stats(s->blk), &s->acct,
951 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
952 s->pio_aiocb = blk_aio_writev(s->blk, sector_num, &s->qiov, n,
953 ide_sector_write_cb, s);
954 }
955
956 static void ide_flush_cb(void *opaque, int ret)
957 {
958 IDEState *s = opaque;
959
960 s->pio_aiocb = NULL;
961
962 if (ret == -ECANCELED) {
963 return;
964 }
965 if (ret < 0) {
966 /* XXX: What sector number to set here? */
967 if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
968 return;
969 }
970 }
971
972 if (s->blk) {
973 block_acct_done(blk_get_stats(s->blk), &s->acct);
974 }
975 s->status = READY_STAT | SEEK_STAT;
976 ide_cmd_done(s);
977 ide_set_irq(s->bus);
978 }
979
980 static void ide_flush_cache(IDEState *s)
981 {
982 if (s->blk == NULL) {
983 ide_flush_cb(s, 0);
984 return;
985 }
986
987 s->status |= BUSY_STAT;
988 block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
989 s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
990 }
991
992 static void ide_cfata_metadata_inquiry(IDEState *s)
993 {
994 uint16_t *p;
995 uint32_t spd;
996
997 p = (uint16_t *) s->io_buffer;
998 memset(p, 0, 0x200);
999 spd = ((s->mdata_size - 1) >> 9) + 1;
1000
1001 put_le16(p + 0, 0x0001); /* Data format revision */
1002 put_le16(p + 1, 0x0000); /* Media property: silicon */
1003 put_le16(p + 2, s->media_changed); /* Media status */
1004 put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */
1005 put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */
1006 put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */
1007 put_le16(p + 6, spd >> 16); /* Sectors per device (high) */
1008 }
1009
1010 static void ide_cfata_metadata_read(IDEState *s)
1011 {
1012 uint16_t *p;
1013
1014 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1015 s->status = ERR_STAT;
1016 s->error = ABRT_ERR;
1017 return;
1018 }
1019
1020 p = (uint16_t *) s->io_buffer;
1021 memset(p, 0, 0x200);
1022
1023 put_le16(p + 0, s->media_changed); /* Media status */
1024 memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1025 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1026 s->nsector << 9), 0x200 - 2));
1027 }
1028
1029 static void ide_cfata_metadata_write(IDEState *s)
1030 {
1031 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1032 s->status = ERR_STAT;
1033 s->error = ABRT_ERR;
1034 return;
1035 }
1036
1037 s->media_changed = 0;
1038
1039 memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1040 s->io_buffer + 2,
1041 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1042 s->nsector << 9), 0x200 - 2));
1043 }
1044
1045 /* called when the inserted state of the media has changed */
1046 static void ide_cd_change_cb(void *opaque, bool load)
1047 {
1048 IDEState *s = opaque;
1049 uint64_t nb_sectors;
1050
1051 s->tray_open = !load;
1052 blk_get_geometry(s->blk, &nb_sectors);
1053 s->nb_sectors = nb_sectors;
1054
1055 /*
1056 * First indicate to the guest that a CD has been removed. That's
1057 * done on the next command the guest sends us.
1058 *
1059 * Then we set UNIT_ATTENTION, by which the guest will
1060 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1061 */
1062 s->cdrom_changed = 1;
1063 s->events.new_media = true;
1064 s->events.eject_request = false;
1065 ide_set_irq(s->bus);
1066 }
1067
1068 static void ide_cd_eject_request_cb(void *opaque, bool force)
1069 {
1070 IDEState *s = opaque;
1071
1072 s->events.eject_request = true;
1073 if (force) {
1074 s->tray_locked = false;
1075 }
1076 ide_set_irq(s->bus);
1077 }
1078
1079 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1080 {
1081 s->lba48 = lba48;
1082
1083 /* handle the 'magic' 0 nsector count conversion here. to avoid
1084 * fiddling with the rest of the read logic, we just store the
1085 * full sector count in ->nsector and ignore ->hob_nsector from now
1086 */
1087 if (!s->lba48) {
1088 if (!s->nsector)
1089 s->nsector = 256;
1090 } else {
1091 if (!s->nsector && !s->hob_nsector)
1092 s->nsector = 65536;
1093 else {
1094 int lo = s->nsector;
1095 int hi = s->hob_nsector;
1096
1097 s->nsector = (hi << 8) | lo;
1098 }
1099 }
1100 }
1101
1102 static void ide_clear_hob(IDEBus *bus)
1103 {
1104 /* any write clears HOB high bit of device control register */
1105 bus->ifs[0].select &= ~(1 << 7);
1106 bus->ifs[1].select &= ~(1 << 7);
1107 }
1108
1109 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1110 {
1111 IDEBus *bus = opaque;
1112
1113 #ifdef DEBUG_IDE
1114 printf("IDE: write addr=0x%x val=0x%02x\n", addr, val);
1115 #endif
1116
1117 addr &= 7;
1118
1119 /* ignore writes to command block while busy with previous command */
1120 if (addr != 7 && (idebus_active_if(bus)->status & (BUSY_STAT|DRQ_STAT)))
1121 return;
1122
1123 switch(addr) {
1124 case 0:
1125 break;
1126 case 1:
1127 ide_clear_hob(bus);
1128 /* NOTE: data is written to the two drives */
1129 bus->ifs[0].hob_feature = bus->ifs[0].feature;
1130 bus->ifs[1].hob_feature = bus->ifs[1].feature;
1131 bus->ifs[0].feature = val;
1132 bus->ifs[1].feature = val;
1133 break;
1134 case 2:
1135 ide_clear_hob(bus);
1136 bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1137 bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1138 bus->ifs[0].nsector = val;
1139 bus->ifs[1].nsector = val;
1140 break;
1141 case 3:
1142 ide_clear_hob(bus);
1143 bus->ifs[0].hob_sector = bus->ifs[0].sector;
1144 bus->ifs[1].hob_sector = bus->ifs[1].sector;
1145 bus->ifs[0].sector = val;
1146 bus->ifs[1].sector = val;
1147 break;
1148 case 4:
1149 ide_clear_hob(bus);
1150 bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1151 bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1152 bus->ifs[0].lcyl = val;
1153 bus->ifs[1].lcyl = val;
1154 break;
1155 case 5:
1156 ide_clear_hob(bus);
1157 bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1158 bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1159 bus->ifs[0].hcyl = val;
1160 bus->ifs[1].hcyl = val;
1161 break;
1162 case 6:
1163 /* FIXME: HOB readback uses bit 7 */
1164 bus->ifs[0].select = (val & ~0x10) | 0xa0;
1165 bus->ifs[1].select = (val | 0x10) | 0xa0;
1166 /* select drive */
1167 bus->unit = (val >> 4) & 1;
1168 break;
1169 default:
1170 case 7:
1171 /* command */
1172 ide_exec_cmd(bus, val);
1173 break;
1174 }
1175 }
1176
1177 static bool cmd_nop(IDEState *s, uint8_t cmd)
1178 {
1179 return true;
1180 }
1181
1182 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1183 {
1184 switch (s->feature) {
1185 case DSM_TRIM:
1186 if (s->blk) {
1187 ide_sector_start_dma(s, IDE_DMA_TRIM);
1188 return false;
1189 }
1190 break;
1191 }
1192
1193 ide_abort_command(s);
1194 return true;
1195 }
1196
1197 static bool cmd_identify(IDEState *s, uint8_t cmd)
1198 {
1199 if (s->blk && s->drive_kind != IDE_CD) {
1200 if (s->drive_kind != IDE_CFATA) {
1201 ide_identify(s);
1202 } else {
1203 ide_cfata_identify(s);
1204 }
1205 s->status = READY_STAT | SEEK_STAT;
1206 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1207 ide_set_irq(s->bus);
1208 return false;
1209 } else {
1210 if (s->drive_kind == IDE_CD) {
1211 ide_set_signature(s);
1212 }
1213 ide_abort_command(s);
1214 }
1215
1216 return true;
1217 }
1218
1219 static bool cmd_verify(IDEState *s, uint8_t cmd)
1220 {
1221 bool lba48 = (cmd == WIN_VERIFY_EXT);
1222
1223 /* do sector number check ? */
1224 ide_cmd_lba48_transform(s, lba48);
1225
1226 return true;
1227 }
1228
1229 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1230 {
1231 if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1232 /* Disable Read and Write Multiple */
1233 s->mult_sectors = 0;
1234 } else if ((s->nsector & 0xff) != 0 &&
1235 ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1236 (s->nsector & (s->nsector - 1)) != 0)) {
1237 ide_abort_command(s);
1238 } else {
1239 s->mult_sectors = s->nsector & 0xff;
1240 }
1241
1242 return true;
1243 }
1244
1245 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1246 {
1247 bool lba48 = (cmd == WIN_MULTREAD_EXT);
1248
1249 if (!s->blk || !s->mult_sectors) {
1250 ide_abort_command(s);
1251 return true;
1252 }
1253
1254 ide_cmd_lba48_transform(s, lba48);
1255 s->req_nb_sectors = s->mult_sectors;
1256 ide_sector_read(s);
1257 return false;
1258 }
1259
1260 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1261 {
1262 bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1263 int n;
1264
1265 if (!s->blk || !s->mult_sectors) {
1266 ide_abort_command(s);
1267 return true;
1268 }
1269
1270 ide_cmd_lba48_transform(s, lba48);
1271
1272 s->req_nb_sectors = s->mult_sectors;
1273 n = MIN(s->nsector, s->req_nb_sectors);
1274
1275 s->status = SEEK_STAT | READY_STAT;
1276 ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1277
1278 s->media_changed = 1;
1279
1280 return false;
1281 }
1282
1283 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1284 {
1285 bool lba48 = (cmd == WIN_READ_EXT);
1286
1287 if (s->drive_kind == IDE_CD) {
1288 ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1289 ide_abort_command(s);
1290 return true;
1291 }
1292
1293 if (!s->blk) {
1294 ide_abort_command(s);
1295 return true;
1296 }
1297
1298 ide_cmd_lba48_transform(s, lba48);
1299 s->req_nb_sectors = 1;
1300 ide_sector_read(s);
1301
1302 return false;
1303 }
1304
1305 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1306 {
1307 bool lba48 = (cmd == WIN_WRITE_EXT);
1308
1309 if (!s->blk) {
1310 ide_abort_command(s);
1311 return true;
1312 }
1313
1314 ide_cmd_lba48_transform(s, lba48);
1315
1316 s->req_nb_sectors = 1;
1317 s->status = SEEK_STAT | READY_STAT;
1318 ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1319
1320 s->media_changed = 1;
1321
1322 return false;
1323 }
1324
1325 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1326 {
1327 bool lba48 = (cmd == WIN_READDMA_EXT);
1328
1329 if (!s->blk) {
1330 ide_abort_command(s);
1331 return true;
1332 }
1333
1334 ide_cmd_lba48_transform(s, lba48);
1335 ide_sector_start_dma(s, IDE_DMA_READ);
1336
1337 return false;
1338 }
1339
1340 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1341 {
1342 bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1343
1344 if (!s->blk) {
1345 ide_abort_command(s);
1346 return true;
1347 }
1348
1349 ide_cmd_lba48_transform(s, lba48);
1350 ide_sector_start_dma(s, IDE_DMA_WRITE);
1351
1352 s->media_changed = 1;
1353
1354 return false;
1355 }
1356
1357 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1358 {
1359 ide_flush_cache(s);
1360 return false;
1361 }
1362
1363 static bool cmd_seek(IDEState *s, uint8_t cmd)
1364 {
1365 /* XXX: Check that seek is within bounds */
1366 return true;
1367 }
1368
1369 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1370 {
1371 bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1372
1373 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1374 if (s->nb_sectors == 0) {
1375 ide_abort_command(s);
1376 return true;
1377 }
1378
1379 ide_cmd_lba48_transform(s, lba48);
1380 ide_set_sector(s, s->nb_sectors - 1);
1381
1382 return true;
1383 }
1384
1385 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1386 {
1387 s->nsector = 0xff; /* device active or idle */
1388 return true;
1389 }
1390
1391 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1392 {
1393 uint16_t *identify_data;
1394
1395 if (!s->blk) {
1396 ide_abort_command(s);
1397 return true;
1398 }
1399
1400 /* XXX: valid for CDROM ? */
1401 switch (s->feature) {
1402 case 0x02: /* write cache enable */
1403 blk_set_enable_write_cache(s->blk, true);
1404 identify_data = (uint16_t *)s->identify_data;
1405 put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1406 return true;
1407 case 0x82: /* write cache disable */
1408 blk_set_enable_write_cache(s->blk, false);
1409 identify_data = (uint16_t *)s->identify_data;
1410 put_le16(identify_data + 85, (1 << 14) | 1);
1411 ide_flush_cache(s);
1412 return false;
1413 case 0xcc: /* reverting to power-on defaults enable */
1414 case 0x66: /* reverting to power-on defaults disable */
1415 case 0xaa: /* read look-ahead enable */
1416 case 0x55: /* read look-ahead disable */
1417 case 0x05: /* set advanced power management mode */
1418 case 0x85: /* disable advanced power management mode */
1419 case 0x69: /* NOP */
1420 case 0x67: /* NOP */
1421 case 0x96: /* NOP */
1422 case 0x9a: /* NOP */
1423 case 0x42: /* enable Automatic Acoustic Mode */
1424 case 0xc2: /* disable Automatic Acoustic Mode */
1425 return true;
1426 case 0x03: /* set transfer mode */
1427 {
1428 uint8_t val = s->nsector & 0x07;
1429 identify_data = (uint16_t *)s->identify_data;
1430
1431 switch (s->nsector >> 3) {
1432 case 0x00: /* pio default */
1433 case 0x01: /* pio mode */
1434 put_le16(identify_data + 62, 0x07);
1435 put_le16(identify_data + 63, 0x07);
1436 put_le16(identify_data + 88, 0x3f);
1437 break;
1438 case 0x02: /* sigle word dma mode*/
1439 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1440 put_le16(identify_data + 63, 0x07);
1441 put_le16(identify_data + 88, 0x3f);
1442 break;
1443 case 0x04: /* mdma mode */
1444 put_le16(identify_data + 62, 0x07);
1445 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1446 put_le16(identify_data + 88, 0x3f);
1447 break;
1448 case 0x08: /* udma mode */
1449 put_le16(identify_data + 62, 0x07);
1450 put_le16(identify_data + 63, 0x07);
1451 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1452 break;
1453 default:
1454 goto abort_cmd;
1455 }
1456 return true;
1457 }
1458 }
1459
1460 abort_cmd:
1461 ide_abort_command(s);
1462 return true;
1463 }
1464
1465
1466 /*** ATAPI commands ***/
1467
1468 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1469 {
1470 ide_atapi_identify(s);
1471 s->status = READY_STAT | SEEK_STAT;
1472 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1473 ide_set_irq(s->bus);
1474 return false;
1475 }
1476
1477 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1478 {
1479 ide_set_signature(s);
1480
1481 if (s->drive_kind == IDE_CD) {
1482 s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1483 * devices to return a clear status register
1484 * with READY_STAT *not* set. */
1485 s->error = 0x01;
1486 } else {
1487 s->status = READY_STAT | SEEK_STAT;
1488 /* The bits of the error register are not as usual for this command!
1489 * They are part of the regular output (this is why ERR_STAT isn't set)
1490 * Device 0 passed, Device 1 passed or not present. */
1491 s->error = 0x01;
1492 ide_set_irq(s->bus);
1493 }
1494
1495 return false;
1496 }
1497
1498 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1499 {
1500 ide_set_signature(s);
1501 s->status = 0x00; /* NOTE: READY is _not_ set */
1502 s->error = 0x01;
1503
1504 return false;
1505 }
1506
1507 static bool cmd_packet(IDEState *s, uint8_t cmd)
1508 {
1509 /* overlapping commands not supported */
1510 if (s->feature & 0x02) {
1511 ide_abort_command(s);
1512 return true;
1513 }
1514
1515 s->status = READY_STAT | SEEK_STAT;
1516 s->atapi_dma = s->feature & 1;
1517 s->nsector = 1;
1518 ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1519 ide_atapi_cmd);
1520 return false;
1521 }
1522
1523
1524 /*** CF-ATA commands ***/
1525
1526 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1527 {
1528 s->error = 0x09; /* miscellaneous error */
1529 s->status = READY_STAT | SEEK_STAT;
1530 ide_set_irq(s->bus);
1531
1532 return false;
1533 }
1534
1535 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1536 {
1537 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1538 * required for Windows 8 to work with AHCI */
1539
1540 if (cmd == CFA_WEAR_LEVEL) {
1541 s->nsector = 0;
1542 }
1543
1544 if (cmd == CFA_ERASE_SECTORS) {
1545 s->media_changed = 1;
1546 }
1547
1548 return true;
1549 }
1550
1551 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1552 {
1553 s->status = READY_STAT | SEEK_STAT;
1554
1555 memset(s->io_buffer, 0, 0x200);
1556 s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */
1557 s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */
1558 s->io_buffer[0x02] = s->select; /* Head */
1559 s->io_buffer[0x03] = s->sector; /* Sector */
1560 s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */
1561 s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */
1562 s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */
1563 s->io_buffer[0x13] = 0x00; /* Erase flag */
1564 s->io_buffer[0x18] = 0x00; /* Hot count */
1565 s->io_buffer[0x19] = 0x00; /* Hot count */
1566 s->io_buffer[0x1a] = 0x01; /* Hot count */
1567
1568 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1569 ide_set_irq(s->bus);
1570
1571 return false;
1572 }
1573
1574 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1575 {
1576 switch (s->feature) {
1577 case 0x02: /* Inquiry Metadata Storage */
1578 ide_cfata_metadata_inquiry(s);
1579 break;
1580 case 0x03: /* Read Metadata Storage */
1581 ide_cfata_metadata_read(s);
1582 break;
1583 case 0x04: /* Write Metadata Storage */
1584 ide_cfata_metadata_write(s);
1585 break;
1586 default:
1587 ide_abort_command(s);
1588 return true;
1589 }
1590
1591 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1592 s->status = 0x00; /* NOTE: READY is _not_ set */
1593 ide_set_irq(s->bus);
1594
1595 return false;
1596 }
1597
1598 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1599 {
1600 switch (s->feature) {
1601 case 0x01: /* sense temperature in device */
1602 s->nsector = 0x50; /* +20 C */
1603 break;
1604 default:
1605 ide_abort_command(s);
1606 return true;
1607 }
1608
1609 return true;
1610 }
1611
1612
1613 /*** SMART commands ***/
1614
1615 static bool cmd_smart(IDEState *s, uint8_t cmd)
1616 {
1617 int n;
1618
1619 if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1620 goto abort_cmd;
1621 }
1622
1623 if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1624 goto abort_cmd;
1625 }
1626
1627 switch (s->feature) {
1628 case SMART_DISABLE:
1629 s->smart_enabled = 0;
1630 return true;
1631
1632 case SMART_ENABLE:
1633 s->smart_enabled = 1;
1634 return true;
1635
1636 case SMART_ATTR_AUTOSAVE:
1637 switch (s->sector) {
1638 case 0x00:
1639 s->smart_autosave = 0;
1640 break;
1641 case 0xf1:
1642 s->smart_autosave = 1;
1643 break;
1644 default:
1645 goto abort_cmd;
1646 }
1647 return true;
1648
1649 case SMART_STATUS:
1650 if (!s->smart_errors) {
1651 s->hcyl = 0xc2;
1652 s->lcyl = 0x4f;
1653 } else {
1654 s->hcyl = 0x2c;
1655 s->lcyl = 0xf4;
1656 }
1657 return true;
1658
1659 case SMART_READ_THRESH:
1660 memset(s->io_buffer, 0, 0x200);
1661 s->io_buffer[0] = 0x01; /* smart struct version */
1662
1663 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1664 s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1665 s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1666 }
1667
1668 /* checksum */
1669 for (n = 0; n < 511; n++) {
1670 s->io_buffer[511] += s->io_buffer[n];
1671 }
1672 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1673
1674 s->status = READY_STAT | SEEK_STAT;
1675 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1676 ide_set_irq(s->bus);
1677 return false;
1678
1679 case SMART_READ_DATA:
1680 memset(s->io_buffer, 0, 0x200);
1681 s->io_buffer[0] = 0x01; /* smart struct version */
1682
1683 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1684 int i;
1685 for (i = 0; i < 11; i++) {
1686 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1687 }
1688 }
1689
1690 s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1691 if (s->smart_selftest_count == 0) {
1692 s->io_buffer[363] = 0;
1693 } else {
1694 s->io_buffer[363] =
1695 s->smart_selftest_data[3 +
1696 (s->smart_selftest_count - 1) *
1697 24];
1698 }
1699 s->io_buffer[364] = 0x20;
1700 s->io_buffer[365] = 0x01;
1701 /* offline data collection capacity: execute + self-test*/
1702 s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1703 s->io_buffer[368] = 0x03; /* smart capability (1) */
1704 s->io_buffer[369] = 0x00; /* smart capability (2) */
1705 s->io_buffer[370] = 0x01; /* error logging supported */
1706 s->io_buffer[372] = 0x02; /* minutes for poll short test */
1707 s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1708 s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1709
1710 for (n = 0; n < 511; n++) {
1711 s->io_buffer[511] += s->io_buffer[n];
1712 }
1713 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1714
1715 s->status = READY_STAT | SEEK_STAT;
1716 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1717 ide_set_irq(s->bus);
1718 return false;
1719
1720 case SMART_READ_LOG:
1721 switch (s->sector) {
1722 case 0x01: /* summary smart error log */
1723 memset(s->io_buffer, 0, 0x200);
1724 s->io_buffer[0] = 0x01;
1725 s->io_buffer[1] = 0x00; /* no error entries */
1726 s->io_buffer[452] = s->smart_errors & 0xff;
1727 s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1728
1729 for (n = 0; n < 511; n++) {
1730 s->io_buffer[511] += s->io_buffer[n];
1731 }
1732 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1733 break;
1734 case 0x06: /* smart self test log */
1735 memset(s->io_buffer, 0, 0x200);
1736 s->io_buffer[0] = 0x01;
1737 if (s->smart_selftest_count == 0) {
1738 s->io_buffer[508] = 0;
1739 } else {
1740 s->io_buffer[508] = s->smart_selftest_count;
1741 for (n = 2; n < 506; n++) {
1742 s->io_buffer[n] = s->smart_selftest_data[n];
1743 }
1744 }
1745
1746 for (n = 0; n < 511; n++) {
1747 s->io_buffer[511] += s->io_buffer[n];
1748 }
1749 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1750 break;
1751 default:
1752 goto abort_cmd;
1753 }
1754 s->status = READY_STAT | SEEK_STAT;
1755 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1756 ide_set_irq(s->bus);
1757 return false;
1758
1759 case SMART_EXECUTE_OFFLINE:
1760 switch (s->sector) {
1761 case 0: /* off-line routine */
1762 case 1: /* short self test */
1763 case 2: /* extended self test */
1764 s->smart_selftest_count++;
1765 if (s->smart_selftest_count > 21) {
1766 s->smart_selftest_count = 1;
1767 }
1768 n = 2 + (s->smart_selftest_count - 1) * 24;
1769 s->smart_selftest_data[n] = s->sector;
1770 s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1771 s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1772 s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1773 break;
1774 default:
1775 goto abort_cmd;
1776 }
1777 return true;
1778 }
1779
1780 abort_cmd:
1781 ide_abort_command(s);
1782 return true;
1783 }
1784
1785 #define HD_OK (1u << IDE_HD)
1786 #define CD_OK (1u << IDE_CD)
1787 #define CFA_OK (1u << IDE_CFATA)
1788 #define HD_CFA_OK (HD_OK | CFA_OK)
1789 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
1790
1791 /* Set the Disk Seek Completed status bit during completion */
1792 #define SET_DSC (1u << 8)
1793
1794 /* See ACS-2 T13/2015-D Table B.2 Command codes */
1795 static const struct {
1796 /* Returns true if the completion code should be run */
1797 bool (*handler)(IDEState *s, uint8_t cmd);
1798 int flags;
1799 } ide_cmd_table[0x100] = {
1800 /* NOP not implemented, mandatory for CD */
1801 [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK },
1802 [WIN_DSM] = { cmd_data_set_management, HD_CFA_OK },
1803 [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK },
1804 [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC},
1805 [WIN_READ] = { cmd_read_pio, ALL_OK },
1806 [WIN_READ_ONCE] = { cmd_read_pio, HD_CFA_OK },
1807 [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK },
1808 [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK },
1809 [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1810 [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK },
1811 [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK },
1812 [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK },
1813 [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK },
1814 [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK },
1815 [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK },
1816 [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK },
1817 [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK },
1818 [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC },
1819 [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC },
1820 [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC },
1821 [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC },
1822 [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK },
1823 [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK },
1824 [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC },
1825 [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK },
1826 [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK },
1827 [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK },
1828 [WIN_SETIDLE2] = { cmd_nop, HD_CFA_OK },
1829 [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1830 [WIN_SLEEPNOW2] = { cmd_nop, HD_CFA_OK },
1831 [WIN_PACKETCMD] = { cmd_packet, CD_OK },
1832 [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK },
1833 [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC },
1834 [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
1835 [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
1836 [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK },
1837 [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK },
1838 [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
1839 [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK },
1840 [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK },
1841 [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK },
1842 [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK },
1843 [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK },
1844 [WIN_STANDBYNOW1] = { cmd_nop, HD_CFA_OK },
1845 [WIN_IDLEIMMEDIATE] = { cmd_nop, HD_CFA_OK },
1846 [WIN_STANDBY] = { cmd_nop, HD_CFA_OK },
1847 [WIN_SETIDLE1] = { cmd_nop, HD_CFA_OK },
1848 [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
1849 [WIN_SLEEPNOW1] = { cmd_nop, HD_CFA_OK },
1850 [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK },
1851 [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK },
1852 [WIN_IDENTIFY] = { cmd_identify, ALL_OK },
1853 [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC },
1854 [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
1855 [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
1856 [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
1857 };
1858
1859 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
1860 {
1861 return cmd < ARRAY_SIZE(ide_cmd_table)
1862 && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
1863 }
1864
1865 void ide_exec_cmd(IDEBus *bus, uint32_t val)
1866 {
1867 IDEState *s;
1868 bool complete;
1869
1870 #if defined(DEBUG_IDE)
1871 printf("ide: CMD=%02x\n", val);
1872 #endif
1873 s = idebus_active_if(bus);
1874 /* ignore commands to non existent slave */
1875 if (s != bus->ifs && !s->blk) {
1876 return;
1877 }
1878
1879 /* Only DEVICE RESET is allowed while BSY or/and DRQ are set */
1880 if ((s->status & (BUSY_STAT|DRQ_STAT)) && val != WIN_DEVICE_RESET)
1881 return;
1882
1883 if (!ide_cmd_permitted(s, val)) {
1884 ide_abort_command(s);
1885 ide_set_irq(s->bus);
1886 return;
1887 }
1888
1889 s->status = READY_STAT | BUSY_STAT;
1890 s->error = 0;
1891 s->io_buffer_offset = 0;
1892
1893 complete = ide_cmd_table[val].handler(s, val);
1894 if (complete) {
1895 s->status &= ~BUSY_STAT;
1896 assert(!!s->error == !!(s->status & ERR_STAT));
1897
1898 if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
1899 s->status |= SEEK_STAT;
1900 }
1901
1902 ide_cmd_done(s);
1903 ide_set_irq(s->bus);
1904 }
1905 }
1906
1907 uint32_t ide_ioport_read(void *opaque, uint32_t addr1)
1908 {
1909 IDEBus *bus = opaque;
1910 IDEState *s = idebus_active_if(bus);
1911 uint32_t addr;
1912 int ret, hob;
1913
1914 addr = addr1 & 7;
1915 /* FIXME: HOB readback uses bit 7, but it's always set right now */
1916 //hob = s->select & (1 << 7);
1917 hob = 0;
1918 switch(addr) {
1919 case 0:
1920 ret = 0xff;
1921 break;
1922 case 1:
1923 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
1924 (s != bus->ifs && !s->blk)) {
1925 ret = 0;
1926 } else if (!hob) {
1927 ret = s->error;
1928 } else {
1929 ret = s->hob_feature;
1930 }
1931 break;
1932 case 2:
1933 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1934 ret = 0;
1935 } else if (!hob) {
1936 ret = s->nsector & 0xff;
1937 } else {
1938 ret = s->hob_nsector;
1939 }
1940 break;
1941 case 3:
1942 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1943 ret = 0;
1944 } else if (!hob) {
1945 ret = s->sector;
1946 } else {
1947 ret = s->hob_sector;
1948 }
1949 break;
1950 case 4:
1951 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1952 ret = 0;
1953 } else if (!hob) {
1954 ret = s->lcyl;
1955 } else {
1956 ret = s->hob_lcyl;
1957 }
1958 break;
1959 case 5:
1960 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1961 ret = 0;
1962 } else if (!hob) {
1963 ret = s->hcyl;
1964 } else {
1965 ret = s->hob_hcyl;
1966 }
1967 break;
1968 case 6:
1969 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
1970 ret = 0;
1971 } else {
1972 ret = s->select;
1973 }
1974 break;
1975 default:
1976 case 7:
1977 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
1978 (s != bus->ifs && !s->blk)) {
1979 ret = 0;
1980 } else {
1981 ret = s->status;
1982 }
1983 qemu_irq_lower(bus->irq);
1984 break;
1985 }
1986 #ifdef DEBUG_IDE
1987 printf("ide: read addr=0x%x val=%02x\n", addr1, ret);
1988 #endif
1989 return ret;
1990 }
1991
1992 uint32_t ide_status_read(void *opaque, uint32_t addr)
1993 {
1994 IDEBus *bus = opaque;
1995 IDEState *s = idebus_active_if(bus);
1996 int ret;
1997
1998 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
1999 (s != bus->ifs && !s->blk)) {
2000 ret = 0;
2001 } else {
2002 ret = s->status;
2003 }
2004 #ifdef DEBUG_IDE
2005 printf("ide: read status addr=0x%x val=%02x\n", addr, ret);
2006 #endif
2007 return ret;
2008 }
2009
2010 void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val)
2011 {
2012 IDEBus *bus = opaque;
2013 IDEState *s;
2014 int i;
2015
2016 #ifdef DEBUG_IDE
2017 printf("ide: write control addr=0x%x val=%02x\n", addr, val);
2018 #endif
2019 /* common for both drives */
2020 if (!(bus->cmd & IDE_CMD_RESET) &&
2021 (val & IDE_CMD_RESET)) {
2022 /* reset low to high */
2023 for(i = 0;i < 2; i++) {
2024 s = &bus->ifs[i];
2025 s->status = BUSY_STAT | SEEK_STAT;
2026 s->error = 0x01;
2027 }
2028 } else if ((bus->cmd & IDE_CMD_RESET) &&
2029 !(val & IDE_CMD_RESET)) {
2030 /* high to low */
2031 for(i = 0;i < 2; i++) {
2032 s = &bus->ifs[i];
2033 if (s->drive_kind == IDE_CD)
2034 s->status = 0x00; /* NOTE: READY is _not_ set */
2035 else
2036 s->status = READY_STAT | SEEK_STAT;
2037 ide_set_signature(s);
2038 }
2039 }
2040
2041 bus->cmd = val;
2042 }
2043
2044 /*
2045 * Returns true if the running PIO transfer is a PIO out (i.e. data is
2046 * transferred from the device to the guest), false if it's a PIO in
2047 */
2048 static bool ide_is_pio_out(IDEState *s)
2049 {
2050 if (s->end_transfer_func == ide_sector_write ||
2051 s->end_transfer_func == ide_atapi_cmd) {
2052 return false;
2053 } else if (s->end_transfer_func == ide_sector_read ||
2054 s->end_transfer_func == ide_transfer_stop ||
2055 s->end_transfer_func == ide_atapi_cmd_reply_end ||
2056 s->end_transfer_func == ide_dummy_transfer_stop) {
2057 return true;
2058 }
2059
2060 abort();
2061 }
2062
2063 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2064 {
2065 IDEBus *bus = opaque;
2066 IDEState *s = idebus_active_if(bus);
2067 uint8_t *p;
2068
2069 /* PIO data access allowed only when DRQ bit is set. The result of a write
2070 * during PIO out is indeterminate, just ignore it. */
2071 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2072 return;
2073 }
2074
2075 p = s->data_ptr;
2076 if (p + 2 > s->data_end) {
2077 return;
2078 }
2079
2080 *(uint16_t *)p = le16_to_cpu(val);
2081 p += 2;
2082 s->data_ptr = p;
2083 if (p >= s->data_end) {
2084 s->status &= ~DRQ_STAT;
2085 s->end_transfer_func(s);
2086 }
2087 }
2088
2089 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2090 {
2091 IDEBus *bus = opaque;
2092 IDEState *s = idebus_active_if(bus);
2093 uint8_t *p;
2094 int ret;
2095
2096 /* PIO data access allowed only when DRQ bit is set. The result of a read
2097 * during PIO in is indeterminate, return 0 and don't move forward. */
2098 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2099 return 0;
2100 }
2101
2102 p = s->data_ptr;
2103 if (p + 2 > s->data_end) {
2104 return 0;
2105 }
2106
2107 ret = cpu_to_le16(*(uint16_t *)p);
2108 p += 2;
2109 s->data_ptr = p;
2110 if (p >= s->data_end) {
2111 s->status &= ~DRQ_STAT;
2112 s->end_transfer_func(s);
2113 }
2114 return ret;
2115 }
2116
2117 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2118 {
2119 IDEBus *bus = opaque;
2120 IDEState *s = idebus_active_if(bus);
2121 uint8_t *p;
2122
2123 /* PIO data access allowed only when DRQ bit is set. The result of a write
2124 * during PIO out is indeterminate, just ignore it. */
2125 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2126 return;
2127 }
2128
2129 p = s->data_ptr;
2130 if (p + 4 > s->data_end) {
2131 return;
2132 }
2133
2134 *(uint32_t *)p = le32_to_cpu(val);
2135 p += 4;
2136 s->data_ptr = p;
2137 if (p >= s->data_end) {
2138 s->status &= ~DRQ_STAT;
2139 s->end_transfer_func(s);
2140 }
2141 }
2142
2143 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2144 {
2145 IDEBus *bus = opaque;
2146 IDEState *s = idebus_active_if(bus);
2147 uint8_t *p;
2148 int ret;
2149
2150 /* PIO data access allowed only when DRQ bit is set. The result of a read
2151 * during PIO in is indeterminate, return 0 and don't move forward. */
2152 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2153 return 0;
2154 }
2155
2156 p = s->data_ptr;
2157 if (p + 4 > s->data_end) {
2158 return 0;
2159 }
2160
2161 ret = cpu_to_le32(*(uint32_t *)p);
2162 p += 4;
2163 s->data_ptr = p;
2164 if (p >= s->data_end) {
2165 s->status &= ~DRQ_STAT;
2166 s->end_transfer_func(s);
2167 }
2168 return ret;
2169 }
2170
2171 static void ide_dummy_transfer_stop(IDEState *s)
2172 {
2173 s->data_ptr = s->io_buffer;
2174 s->data_end = s->io_buffer;
2175 s->io_buffer[0] = 0xff;
2176 s->io_buffer[1] = 0xff;
2177 s->io_buffer[2] = 0xff;
2178 s->io_buffer[3] = 0xff;
2179 }
2180
2181 static void ide_reset(IDEState *s)
2182 {
2183 #ifdef DEBUG_IDE
2184 printf("ide: reset\n");
2185 #endif
2186
2187 if (s->pio_aiocb) {
2188 blk_aio_cancel(s->pio_aiocb);
2189 s->pio_aiocb = NULL;
2190 }
2191
2192 if (s->drive_kind == IDE_CFATA)
2193 s->mult_sectors = 0;
2194 else
2195 s->mult_sectors = MAX_MULT_SECTORS;
2196 /* ide regs */
2197 s->feature = 0;
2198 s->error = 0;
2199 s->nsector = 0;
2200 s->sector = 0;
2201 s->lcyl = 0;
2202 s->hcyl = 0;
2203
2204 /* lba48 */
2205 s->hob_feature = 0;
2206 s->hob_sector = 0;
2207 s->hob_nsector = 0;
2208 s->hob_lcyl = 0;
2209 s->hob_hcyl = 0;
2210
2211 s->select = 0xa0;
2212 s->status = READY_STAT | SEEK_STAT;
2213
2214 s->lba48 = 0;
2215
2216 /* ATAPI specific */
2217 s->sense_key = 0;
2218 s->asc = 0;
2219 s->cdrom_changed = 0;
2220 s->packet_transfer_size = 0;
2221 s->elementary_transfer_size = 0;
2222 s->io_buffer_index = 0;
2223 s->cd_sector_size = 0;
2224 s->atapi_dma = 0;
2225 s->tray_locked = 0;
2226 s->tray_open = 0;
2227 /* ATA DMA state */
2228 s->io_buffer_size = 0;
2229 s->req_nb_sectors = 0;
2230
2231 ide_set_signature(s);
2232 /* init the transfer handler so that 0xffff is returned on data
2233 accesses */
2234 s->end_transfer_func = ide_dummy_transfer_stop;
2235 ide_dummy_transfer_stop(s);
2236 s->media_changed = 0;
2237 }
2238
2239 void ide_bus_reset(IDEBus *bus)
2240 {
2241 bus->unit = 0;
2242 bus->cmd = 0;
2243 ide_reset(&bus->ifs[0]);
2244 ide_reset(&bus->ifs[1]);
2245 ide_clear_hob(bus);
2246
2247 /* pending async DMA */
2248 if (bus->dma->aiocb) {
2249 #ifdef DEBUG_AIO
2250 printf("aio_cancel\n");
2251 #endif
2252 blk_aio_cancel(bus->dma->aiocb);
2253 bus->dma->aiocb = NULL;
2254 }
2255
2256 /* reset dma provider too */
2257 if (bus->dma->ops->reset) {
2258 bus->dma->ops->reset(bus->dma);
2259 }
2260 }
2261
2262 static bool ide_cd_is_tray_open(void *opaque)
2263 {
2264 return ((IDEState *)opaque)->tray_open;
2265 }
2266
2267 static bool ide_cd_is_medium_locked(void *opaque)
2268 {
2269 return ((IDEState *)opaque)->tray_locked;
2270 }
2271
2272 static void ide_resize_cb(void *opaque)
2273 {
2274 IDEState *s = opaque;
2275 uint64_t nb_sectors;
2276
2277 if (!s->identify_set) {
2278 return;
2279 }
2280
2281 blk_get_geometry(s->blk, &nb_sectors);
2282 s->nb_sectors = nb_sectors;
2283
2284 /* Update the identify data buffer. */
2285 if (s->drive_kind == IDE_CFATA) {
2286 ide_cfata_identify_size(s);
2287 } else {
2288 /* IDE_CD uses a different set of callbacks entirely. */
2289 assert(s->drive_kind != IDE_CD);
2290 ide_identify_size(s);
2291 }
2292 }
2293
2294 static const BlockDevOps ide_cd_block_ops = {
2295 .change_media_cb = ide_cd_change_cb,
2296 .eject_request_cb = ide_cd_eject_request_cb,
2297 .is_tray_open = ide_cd_is_tray_open,
2298 .is_medium_locked = ide_cd_is_medium_locked,
2299 };
2300
2301 static const BlockDevOps ide_hd_block_ops = {
2302 .resize_cb = ide_resize_cb,
2303 };
2304
2305 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2306 const char *version, const char *serial, const char *model,
2307 uint64_t wwn,
2308 uint32_t cylinders, uint32_t heads, uint32_t secs,
2309 int chs_trans)
2310 {
2311 uint64_t nb_sectors;
2312
2313 s->blk = blk;
2314 s->drive_kind = kind;
2315
2316 blk_get_geometry(blk, &nb_sectors);
2317 s->cylinders = cylinders;
2318 s->heads = heads;
2319 s->sectors = secs;
2320 s->chs_trans = chs_trans;
2321 s->nb_sectors = nb_sectors;
2322 s->wwn = wwn;
2323 /* The SMART values should be preserved across power cycles
2324 but they aren't. */
2325 s->smart_enabled = 1;
2326 s->smart_autosave = 1;
2327 s->smart_errors = 0;
2328 s->smart_selftest_count = 0;
2329 if (kind == IDE_CD) {
2330 blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2331 blk_set_guest_block_size(blk, 2048);
2332 } else {
2333 if (!blk_is_inserted(s->blk)) {
2334 error_report("Device needs media, but drive is empty");
2335 return -1;
2336 }
2337 if (blk_is_read_only(blk)) {
2338 error_report("Can't use a read-only drive");
2339 return -1;
2340 }
2341 blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2342 }
2343 if (serial) {
2344 pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2345 } else {
2346 snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2347 "QM%05d", s->drive_serial);
2348 }
2349 if (model) {
2350 pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2351 } else {
2352 switch (kind) {
2353 case IDE_CD:
2354 strcpy(s->drive_model_str, "QEMU DVD-ROM");
2355 break;
2356 case IDE_CFATA:
2357 strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2358 break;
2359 default:
2360 strcpy(s->drive_model_str, "QEMU HARDDISK");
2361 break;
2362 }
2363 }
2364
2365 if (version) {
2366 pstrcpy(s->version, sizeof(s->version), version);
2367 } else {
2368 pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2369 }
2370
2371 ide_reset(s);
2372 blk_iostatus_enable(blk);
2373 return 0;
2374 }
2375
2376 static void ide_init1(IDEBus *bus, int unit)
2377 {
2378 static int drive_serial = 1;
2379 IDEState *s = &bus->ifs[unit];
2380
2381 s->bus = bus;
2382 s->unit = unit;
2383 s->drive_serial = drive_serial++;
2384 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2385 s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2386 s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2387 memset(s->io_buffer, 0, s->io_buffer_total_len);
2388
2389 s->smart_selftest_data = blk_blockalign(s->blk, 512);
2390 memset(s->smart_selftest_data, 0, 512);
2391
2392 s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2393 ide_sector_write_timer_cb, s);
2394 }
2395
2396 static int ide_nop_int(IDEDMA *dma, int x)
2397 {
2398 return 0;
2399 }
2400
2401 static void ide_nop(IDEDMA *dma)
2402 {
2403 }
2404
2405 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
2406 {
2407 return 0;
2408 }
2409
2410 static const IDEDMAOps ide_dma_nop_ops = {
2411 .prepare_buf = ide_nop_int32,
2412 .restart_dma = ide_nop,
2413 .rw_buf = ide_nop_int,
2414 };
2415
2416 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2417 {
2418 s->unit = s->bus->retry_unit;
2419 ide_set_sector(s, s->bus->retry_sector_num);
2420 s->nsector = s->bus->retry_nsector;
2421 s->bus->dma->ops->restart_dma(s->bus->dma);
2422 s->io_buffer_size = 0;
2423 s->dma_cmd = dma_cmd;
2424 ide_start_dma(s, ide_dma_cb);
2425 }
2426
2427 static void ide_restart_bh(void *opaque)
2428 {
2429 IDEBus *bus = opaque;
2430 IDEState *s;
2431 bool is_read;
2432 int error_status;
2433
2434 qemu_bh_delete(bus->bh);
2435 bus->bh = NULL;
2436
2437 error_status = bus->error_status;
2438 if (bus->error_status == 0) {
2439 return;
2440 }
2441
2442 s = idebus_active_if(bus);
2443 is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2444
2445 /* The error status must be cleared before resubmitting the request: The
2446 * request may fail again, and this case can only be distinguished if the
2447 * called function can set a new error status. */
2448 bus->error_status = 0;
2449
2450 /* The HBA has generically asked to be kicked on retry */
2451 if (error_status & IDE_RETRY_HBA) {
2452 if (s->bus->dma->ops->restart) {
2453 s->bus->dma->ops->restart(s->bus->dma);
2454 }
2455 }
2456
2457 if (error_status & IDE_RETRY_DMA) {
2458 if (error_status & IDE_RETRY_TRIM) {
2459 ide_restart_dma(s, IDE_DMA_TRIM);
2460 } else {
2461 ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2462 }
2463 } else if (error_status & IDE_RETRY_PIO) {
2464 if (is_read) {
2465 ide_sector_read(s);
2466 } else {
2467 ide_sector_write(s);
2468 }
2469 } else if (error_status & IDE_RETRY_FLUSH) {
2470 ide_flush_cache(s);
2471 } else {
2472 /*
2473 * We've not got any bits to tell us about ATAPI - but
2474 * we do have the end_transfer_func that tells us what
2475 * we're trying to do.
2476 */
2477 if (s->end_transfer_func == ide_atapi_cmd) {
2478 ide_atapi_dma_restart(s);
2479 }
2480 }
2481 }
2482
2483 static void ide_restart_cb(void *opaque, int running, RunState state)
2484 {
2485 IDEBus *bus = opaque;
2486
2487 if (!running)
2488 return;
2489
2490 if (!bus->bh) {
2491 bus->bh = qemu_bh_new(ide_restart_bh, bus);
2492 qemu_bh_schedule(bus->bh);
2493 }
2494 }
2495
2496 void ide_register_restart_cb(IDEBus *bus)
2497 {
2498 if (bus->dma->ops->restart_dma) {
2499 qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2500 }
2501 }
2502
2503 static IDEDMA ide_dma_nop = {
2504 .ops = &ide_dma_nop_ops,
2505 .aiocb = NULL,
2506 };
2507
2508 void ide_init2(IDEBus *bus, qemu_irq irq)
2509 {
2510 int i;
2511
2512 for(i = 0; i < 2; i++) {
2513 ide_init1(bus, i);
2514 ide_reset(&bus->ifs[i]);
2515 }
2516 bus->irq = irq;
2517 bus->dma = &ide_dma_nop;
2518 }
2519
2520 static const MemoryRegionPortio ide_portio_list[] = {
2521 { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write },
2522 { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew },
2523 { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel },
2524 PORTIO_END_OF_LIST(),
2525 };
2526
2527 static const MemoryRegionPortio ide_portio2_list[] = {
2528 { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write },
2529 PORTIO_END_OF_LIST(),
2530 };
2531
2532 void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2)
2533 {
2534 /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA
2535 bridge has been setup properly to always register with ISA. */
2536 isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide");
2537
2538 if (iobase2) {
2539 isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide");
2540 }
2541 }
2542
2543 static bool is_identify_set(void *opaque, int version_id)
2544 {
2545 IDEState *s = opaque;
2546
2547 return s->identify_set != 0;
2548 }
2549
2550 static EndTransferFunc* transfer_end_table[] = {
2551 ide_sector_read,
2552 ide_sector_write,
2553 ide_transfer_stop,
2554 ide_atapi_cmd_reply_end,
2555 ide_atapi_cmd,
2556 ide_dummy_transfer_stop,
2557 };
2558
2559 static int transfer_end_table_idx(EndTransferFunc *fn)
2560 {
2561 int i;
2562
2563 for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2564 if (transfer_end_table[i] == fn)
2565 return i;
2566
2567 return -1;
2568 }
2569
2570 static int ide_drive_post_load(void *opaque, int version_id)
2571 {
2572 IDEState *s = opaque;
2573
2574 if (s->blk && s->identify_set) {
2575 blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2576 }
2577 return 0;
2578 }
2579
2580 static int ide_drive_pio_post_load(void *opaque, int version_id)
2581 {
2582 IDEState *s = opaque;
2583
2584 if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2585 return -EINVAL;
2586 }
2587 s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2588 s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2589 s->data_end = s->data_ptr + s->cur_io_buffer_len;
2590 s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2591
2592 return 0;
2593 }
2594
2595 static void ide_drive_pio_pre_save(void *opaque)
2596 {
2597 IDEState *s = opaque;
2598 int idx;
2599
2600 s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2601 s->cur_io_buffer_len = s->data_end - s->data_ptr;
2602
2603 idx = transfer_end_table_idx(s->end_transfer_func);
2604 if (idx == -1) {
2605 fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2606 __func__);
2607 s->end_transfer_fn_idx = 2;
2608 } else {
2609 s->end_transfer_fn_idx = idx;
2610 }
2611 }
2612
2613 static bool ide_drive_pio_state_needed(void *opaque)
2614 {
2615 IDEState *s = opaque;
2616
2617 return ((s->status & DRQ_STAT) != 0)
2618 || (s->bus->error_status & IDE_RETRY_PIO);
2619 }
2620
2621 static bool ide_tray_state_needed(void *opaque)
2622 {
2623 IDEState *s = opaque;
2624
2625 return s->tray_open || s->tray_locked;
2626 }
2627
2628 static bool ide_atapi_gesn_needed(void *opaque)
2629 {
2630 IDEState *s = opaque;
2631
2632 return s->events.new_media || s->events.eject_request;
2633 }
2634
2635 static bool ide_error_needed(void *opaque)
2636 {
2637 IDEBus *bus = opaque;
2638
2639 return (bus->error_status != 0);
2640 }
2641
2642 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2643 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2644 .name ="ide_drive/atapi/gesn_state",
2645 .version_id = 1,
2646 .minimum_version_id = 1,
2647 .needed = ide_atapi_gesn_needed,
2648 .fields = (VMStateField[]) {
2649 VMSTATE_BOOL(events.new_media, IDEState),
2650 VMSTATE_BOOL(events.eject_request, IDEState),
2651 VMSTATE_END_OF_LIST()
2652 }
2653 };
2654
2655 static const VMStateDescription vmstate_ide_tray_state = {
2656 .name = "ide_drive/tray_state",
2657 .version_id = 1,
2658 .minimum_version_id = 1,
2659 .needed = ide_tray_state_needed,
2660 .fields = (VMStateField[]) {
2661 VMSTATE_BOOL(tray_open, IDEState),
2662 VMSTATE_BOOL(tray_locked, IDEState),
2663 VMSTATE_END_OF_LIST()
2664 }
2665 };
2666
2667 static const VMStateDescription vmstate_ide_drive_pio_state = {
2668 .name = "ide_drive/pio_state",
2669 .version_id = 1,
2670 .minimum_version_id = 1,
2671 .pre_save = ide_drive_pio_pre_save,
2672 .post_load = ide_drive_pio_post_load,
2673 .needed = ide_drive_pio_state_needed,
2674 .fields = (VMStateField[]) {
2675 VMSTATE_INT32(req_nb_sectors, IDEState),
2676 VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2677 vmstate_info_uint8, uint8_t),
2678 VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2679 VMSTATE_INT32(cur_io_buffer_len, IDEState),
2680 VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2681 VMSTATE_INT32(elementary_transfer_size, IDEState),
2682 VMSTATE_INT32(packet_transfer_size, IDEState),
2683 VMSTATE_END_OF_LIST()
2684 }
2685 };
2686
2687 const VMStateDescription vmstate_ide_drive = {
2688 .name = "ide_drive",
2689 .version_id = 3,
2690 .minimum_version_id = 0,
2691 .post_load = ide_drive_post_load,
2692 .fields = (VMStateField[]) {
2693 VMSTATE_INT32(mult_sectors, IDEState),
2694 VMSTATE_INT32(identify_set, IDEState),
2695 VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2696 VMSTATE_UINT8(feature, IDEState),
2697 VMSTATE_UINT8(error, IDEState),
2698 VMSTATE_UINT32(nsector, IDEState),
2699 VMSTATE_UINT8(sector, IDEState),
2700 VMSTATE_UINT8(lcyl, IDEState),
2701 VMSTATE_UINT8(hcyl, IDEState),
2702 VMSTATE_UINT8(hob_feature, IDEState),
2703 VMSTATE_UINT8(hob_sector, IDEState),
2704 VMSTATE_UINT8(hob_nsector, IDEState),
2705 VMSTATE_UINT8(hob_lcyl, IDEState),
2706 VMSTATE_UINT8(hob_hcyl, IDEState),
2707 VMSTATE_UINT8(select, IDEState),
2708 VMSTATE_UINT8(status, IDEState),
2709 VMSTATE_UINT8(lba48, IDEState),
2710 VMSTATE_UINT8(sense_key, IDEState),
2711 VMSTATE_UINT8(asc, IDEState),
2712 VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2713 VMSTATE_END_OF_LIST()
2714 },
2715 .subsections = (const VMStateDescription*[]) {
2716 &vmstate_ide_drive_pio_state,
2717 &vmstate_ide_tray_state,
2718 &vmstate_ide_atapi_gesn_state,
2719 NULL
2720 }
2721 };
2722
2723 static const VMStateDescription vmstate_ide_error_status = {
2724 .name ="ide_bus/error",
2725 .version_id = 2,
2726 .minimum_version_id = 1,
2727 .needed = ide_error_needed,
2728 .fields = (VMStateField[]) {
2729 VMSTATE_INT32(error_status, IDEBus),
2730 VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2731 VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2732 VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2733 VMSTATE_END_OF_LIST()
2734 }
2735 };
2736
2737 const VMStateDescription vmstate_ide_bus = {
2738 .name = "ide_bus",
2739 .version_id = 1,
2740 .minimum_version_id = 1,
2741 .fields = (VMStateField[]) {
2742 VMSTATE_UINT8(cmd, IDEBus),
2743 VMSTATE_UINT8(unit, IDEBus),
2744 VMSTATE_END_OF_LIST()
2745 },
2746 .subsections = (const VMStateDescription*[]) {
2747 &vmstate_ide_error_status,
2748 NULL
2749 }
2750 };
2751
2752 void ide_drive_get(DriveInfo **hd, int n)
2753 {
2754 int i;
2755 int highest_bus = drive_get_max_bus(IF_IDE) + 1;
2756 int max_devs = drive_get_max_devs(IF_IDE);
2757 int n_buses = max_devs ? (n / max_devs) : n;
2758
2759 /*
2760 * Note: The number of actual buses available is not known.
2761 * We compute this based on the size of the DriveInfo* array, n.
2762 * If it is less than max_devs * <num_real_buses>,
2763 * We will stop looking for drives prematurely instead of overfilling
2764 * the array.
2765 */
2766
2767 if (highest_bus > n_buses) {
2768 error_report("Too many IDE buses defined (%d > %d)",
2769 highest_bus, n_buses);
2770 exit(1);
2771 }
2772
2773 for (i = 0; i < n; i++) {
2774 hd[i] = drive_get_by_index(IF_IDE, i);
2775 }
2776 }