]> git.proxmox.com Git - mirror_qemu.git/blob - hw/ide/core.c
hw/arm/virt: Disable LPA2 for -machine virt-6.2
[mirror_qemu.git] / hw / ide / core.c
1 /*
2 * QEMU IDE disk and CD/DVD-ROM Emulator
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26 #include "qemu/osdep.h"
27 #include "hw/isa/isa.h"
28 #include "migration/vmstate.h"
29 #include "qemu/error-report.h"
30 #include "qemu/main-loop.h"
31 #include "qemu/timer.h"
32 #include "qemu/hw-version.h"
33 #include "qemu/memalign.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/blockdev.h"
36 #include "sysemu/dma.h"
37 #include "hw/block/block.h"
38 #include "sysemu/block-backend.h"
39 #include "qapi/error.h"
40 #include "qemu/cutils.h"
41 #include "sysemu/replay.h"
42 #include "sysemu/runstate.h"
43 #include "hw/ide/internal.h"
44 #include "trace.h"
45
46 /* These values were based on a Seagate ST3500418AS but have been modified
47 to make more sense in QEMU */
48 static const int smart_attributes[][12] = {
49 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
50 /* raw read error rate*/
51 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
52 /* spin up */
53 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54 /* start stop count */
55 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
56 /* remapped sectors */
57 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
58 /* power on hours */
59 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
60 /* power cycle count */
61 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
62 /* airflow-temperature-celsius */
63 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
64 };
65
66 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
67 [IDE_DMA_READ] = "DMA READ",
68 [IDE_DMA_WRITE] = "DMA WRITE",
69 [IDE_DMA_TRIM] = "DMA TRIM",
70 [IDE_DMA_ATAPI] = "DMA ATAPI"
71 };
72
73 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
74 {
75 if ((unsigned)enval < IDE_DMA__COUNT) {
76 return IDE_DMA_CMD_lookup[enval];
77 }
78 return "DMA UNKNOWN CMD";
79 }
80
81 static void ide_dummy_transfer_stop(IDEState *s);
82
83 static void padstr(char *str, const char *src, int len)
84 {
85 int i, v;
86 for(i = 0; i < len; i++) {
87 if (*src)
88 v = *src++;
89 else
90 v = ' ';
91 str[i^1] = v;
92 }
93 }
94
95 static void put_le16(uint16_t *p, unsigned int v)
96 {
97 *p = cpu_to_le16(v);
98 }
99
100 static void ide_identify_size(IDEState *s)
101 {
102 uint16_t *p = (uint16_t *)s->identify_data;
103 int64_t nb_sectors_lba28 = s->nb_sectors;
104 if (nb_sectors_lba28 >= 1 << 28) {
105 nb_sectors_lba28 = (1 << 28) - 1;
106 }
107 put_le16(p + 60, nb_sectors_lba28);
108 put_le16(p + 61, nb_sectors_lba28 >> 16);
109 put_le16(p + 100, s->nb_sectors);
110 put_le16(p + 101, s->nb_sectors >> 16);
111 put_le16(p + 102, s->nb_sectors >> 32);
112 put_le16(p + 103, s->nb_sectors >> 48);
113 }
114
115 static void ide_identify(IDEState *s)
116 {
117 uint16_t *p;
118 unsigned int oldsize;
119 IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
120
121 p = (uint16_t *)s->identify_data;
122 if (s->identify_set) {
123 goto fill_buffer;
124 }
125 memset(p, 0, sizeof(s->identify_data));
126
127 put_le16(p + 0, 0x0040);
128 put_le16(p + 1, s->cylinders);
129 put_le16(p + 3, s->heads);
130 put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
131 put_le16(p + 5, 512); /* XXX: retired, remove ? */
132 put_le16(p + 6, s->sectors);
133 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
134 put_le16(p + 20, 3); /* XXX: retired, remove ? */
135 put_le16(p + 21, 512); /* cache size in sectors */
136 put_le16(p + 22, 4); /* ecc bytes */
137 padstr((char *)(p + 23), s->version, 8); /* firmware version */
138 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
139 #if MAX_MULT_SECTORS > 1
140 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
141 #endif
142 put_le16(p + 48, 1); /* dword I/O */
143 put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
144 put_le16(p + 51, 0x200); /* PIO transfer cycle */
145 put_le16(p + 52, 0x200); /* DMA transfer cycle */
146 put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
147 put_le16(p + 54, s->cylinders);
148 put_le16(p + 55, s->heads);
149 put_le16(p + 56, s->sectors);
150 oldsize = s->cylinders * s->heads * s->sectors;
151 put_le16(p + 57, oldsize);
152 put_le16(p + 58, oldsize >> 16);
153 if (s->mult_sectors)
154 put_le16(p + 59, 0x100 | s->mult_sectors);
155 /* *(p + 60) := nb_sectors -- see ide_identify_size */
156 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
157 put_le16(p + 62, 0x07); /* single word dma0-2 supported */
158 put_le16(p + 63, 0x07); /* mdma0-2 supported */
159 put_le16(p + 64, 0x03); /* pio3-4 supported */
160 put_le16(p + 65, 120);
161 put_le16(p + 66, 120);
162 put_le16(p + 67, 120);
163 put_le16(p + 68, 120);
164 if (dev && dev->conf.discard_granularity) {
165 put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
166 }
167
168 if (s->ncq_queues) {
169 put_le16(p + 75, s->ncq_queues - 1);
170 /* NCQ supported */
171 put_le16(p + 76, (1 << 8));
172 }
173
174 put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
175 put_le16(p + 81, 0x16); /* conforms to ata5 */
176 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
177 put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
178 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
179 put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
180 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
181 if (s->wwn) {
182 put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
183 } else {
184 put_le16(p + 84, (1 << 14) | 0);
185 }
186 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
187 if (blk_enable_write_cache(s->blk)) {
188 put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
189 } else {
190 put_le16(p + 85, (1 << 14) | 1);
191 }
192 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
193 put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
194 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
195 if (s->wwn) {
196 put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
197 } else {
198 put_le16(p + 87, (1 << 14) | 0);
199 }
200 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
201 put_le16(p + 93, 1 | (1 << 14) | 0x2000);
202 /* *(p + 100) := nb_sectors -- see ide_identify_size */
203 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
204 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
205 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
206
207 if (dev && dev->conf.physical_block_size)
208 put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
209 if (s->wwn) {
210 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
211 put_le16(p + 108, s->wwn >> 48);
212 put_le16(p + 109, s->wwn >> 32);
213 put_le16(p + 110, s->wwn >> 16);
214 put_le16(p + 111, s->wwn);
215 }
216 if (dev && dev->conf.discard_granularity) {
217 put_le16(p + 169, 1); /* TRIM support */
218 }
219 if (dev) {
220 put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
221 }
222
223 ide_identify_size(s);
224 s->identify_set = 1;
225
226 fill_buffer:
227 memcpy(s->io_buffer, p, sizeof(s->identify_data));
228 }
229
230 static void ide_atapi_identify(IDEState *s)
231 {
232 uint16_t *p;
233
234 p = (uint16_t *)s->identify_data;
235 if (s->identify_set) {
236 goto fill_buffer;
237 }
238 memset(p, 0, sizeof(s->identify_data));
239
240 /* Removable CDROM, 50us response, 12 byte packets */
241 put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
242 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
243 put_le16(p + 20, 3); /* buffer type */
244 put_le16(p + 21, 512); /* cache size in sectors */
245 put_le16(p + 22, 4); /* ecc bytes */
246 padstr((char *)(p + 23), s->version, 8); /* firmware version */
247 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
248 put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
249 #ifdef USE_DMA_CDROM
250 put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
251 put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
252 put_le16(p + 62, 7); /* single word dma0-2 supported */
253 put_le16(p + 63, 7); /* mdma0-2 supported */
254 #else
255 put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
256 put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
257 put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
258 #endif
259 put_le16(p + 64, 3); /* pio3-4 supported */
260 put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
261 put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
262 put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
263 put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
264
265 put_le16(p + 71, 30); /* in ns */
266 put_le16(p + 72, 30); /* in ns */
267
268 if (s->ncq_queues) {
269 put_le16(p + 75, s->ncq_queues - 1);
270 /* NCQ supported */
271 put_le16(p + 76, (1 << 8));
272 }
273
274 put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
275 if (s->wwn) {
276 put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
277 put_le16(p + 87, (1 << 8)); /* WWN enabled */
278 }
279
280 #ifdef USE_DMA_CDROM
281 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
282 #endif
283
284 if (s->wwn) {
285 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
286 put_le16(p + 108, s->wwn >> 48);
287 put_le16(p + 109, s->wwn >> 32);
288 put_le16(p + 110, s->wwn >> 16);
289 put_le16(p + 111, s->wwn);
290 }
291
292 s->identify_set = 1;
293
294 fill_buffer:
295 memcpy(s->io_buffer, p, sizeof(s->identify_data));
296 }
297
298 static void ide_cfata_identify_size(IDEState *s)
299 {
300 uint16_t *p = (uint16_t *)s->identify_data;
301 put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
302 put_le16(p + 8, s->nb_sectors); /* Sectors per card */
303 put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
304 put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
305 }
306
307 static void ide_cfata_identify(IDEState *s)
308 {
309 uint16_t *p;
310 uint32_t cur_sec;
311
312 p = (uint16_t *)s->identify_data;
313 if (s->identify_set) {
314 goto fill_buffer;
315 }
316 memset(p, 0, sizeof(s->identify_data));
317
318 cur_sec = s->cylinders * s->heads * s->sectors;
319
320 put_le16(p + 0, 0x848a); /* CF Storage Card signature */
321 put_le16(p + 1, s->cylinders); /* Default cylinders */
322 put_le16(p + 3, s->heads); /* Default heads */
323 put_le16(p + 6, s->sectors); /* Default sectors per track */
324 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
325 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
326 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
327 put_le16(p + 22, 0x0004); /* ECC bytes */
328 padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
329 padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
330 #if MAX_MULT_SECTORS > 1
331 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
332 #else
333 put_le16(p + 47, 0x0000);
334 #endif
335 put_le16(p + 49, 0x0f00); /* Capabilities */
336 put_le16(p + 51, 0x0002); /* PIO cycle timing mode */
337 put_le16(p + 52, 0x0001); /* DMA cycle timing mode */
338 put_le16(p + 53, 0x0003); /* Translation params valid */
339 put_le16(p + 54, s->cylinders); /* Current cylinders */
340 put_le16(p + 55, s->heads); /* Current heads */
341 put_le16(p + 56, s->sectors); /* Current sectors */
342 put_le16(p + 57, cur_sec); /* Current capacity */
343 put_le16(p + 58, cur_sec >> 16); /* Current capacity */
344 if (s->mult_sectors) /* Multiple sector setting */
345 put_le16(p + 59, 0x100 | s->mult_sectors);
346 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
347 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
348 put_le16(p + 63, 0x0203); /* Multiword DMA capability */
349 put_le16(p + 64, 0x0001); /* Flow Control PIO support */
350 put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
351 put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */
352 put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */
353 put_le16(p + 82, 0x400c); /* Command Set supported */
354 put_le16(p + 83, 0x7068); /* Command Set supported */
355 put_le16(p + 84, 0x4000); /* Features supported */
356 put_le16(p + 85, 0x000c); /* Command Set enabled */
357 put_le16(p + 86, 0x7044); /* Command Set enabled */
358 put_le16(p + 87, 0x4000); /* Features enabled */
359 put_le16(p + 91, 0x4060); /* Current APM level */
360 put_le16(p + 129, 0x0002); /* Current features option */
361 put_le16(p + 130, 0x0005); /* Reassigned sectors */
362 put_le16(p + 131, 0x0001); /* Initial power mode */
363 put_le16(p + 132, 0x0000); /* User signature */
364 put_le16(p + 160, 0x8100); /* Power requirement */
365 put_le16(p + 161, 0x8001); /* CF command set */
366
367 ide_cfata_identify_size(s);
368 s->identify_set = 1;
369
370 fill_buffer:
371 memcpy(s->io_buffer, p, sizeof(s->identify_data));
372 }
373
374 static void ide_set_signature(IDEState *s)
375 {
376 s->select &= ~(ATA_DEV_HS); /* clear head */
377 /* put signature */
378 s->nsector = 1;
379 s->sector = 1;
380 if (s->drive_kind == IDE_CD) {
381 s->lcyl = 0x14;
382 s->hcyl = 0xeb;
383 } else if (s->blk) {
384 s->lcyl = 0;
385 s->hcyl = 0;
386 } else {
387 s->lcyl = 0xff;
388 s->hcyl = 0xff;
389 }
390 }
391
392 static bool ide_sect_range_ok(IDEState *s,
393 uint64_t sector, uint64_t nb_sectors)
394 {
395 uint64_t total_sectors;
396
397 blk_get_geometry(s->blk, &total_sectors);
398 if (sector > total_sectors || nb_sectors > total_sectors - sector) {
399 return false;
400 }
401 return true;
402 }
403
404 typedef struct TrimAIOCB {
405 BlockAIOCB common;
406 IDEState *s;
407 QEMUBH *bh;
408 int ret;
409 QEMUIOVector *qiov;
410 BlockAIOCB *aiocb;
411 int i, j;
412 } TrimAIOCB;
413
414 static void trim_aio_cancel(BlockAIOCB *acb)
415 {
416 TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
417
418 /* Exit the loop so ide_issue_trim_cb will not continue */
419 iocb->j = iocb->qiov->niov - 1;
420 iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
421
422 iocb->ret = -ECANCELED;
423
424 if (iocb->aiocb) {
425 blk_aio_cancel_async(iocb->aiocb);
426 iocb->aiocb = NULL;
427 }
428 }
429
430 static const AIOCBInfo trim_aiocb_info = {
431 .aiocb_size = sizeof(TrimAIOCB),
432 .cancel_async = trim_aio_cancel,
433 };
434
435 static void ide_trim_bh_cb(void *opaque)
436 {
437 TrimAIOCB *iocb = opaque;
438
439 iocb->common.cb(iocb->common.opaque, iocb->ret);
440
441 qemu_bh_delete(iocb->bh);
442 iocb->bh = NULL;
443 qemu_aio_unref(iocb);
444 }
445
446 static void ide_issue_trim_cb(void *opaque, int ret)
447 {
448 TrimAIOCB *iocb = opaque;
449 IDEState *s = iocb->s;
450
451 if (iocb->i >= 0) {
452 if (ret >= 0) {
453 block_acct_done(blk_get_stats(s->blk), &s->acct);
454 } else {
455 block_acct_failed(blk_get_stats(s->blk), &s->acct);
456 }
457 }
458
459 if (ret >= 0) {
460 while (iocb->j < iocb->qiov->niov) {
461 int j = iocb->j;
462 while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
463 int i = iocb->i;
464 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
465
466 /* 6-byte LBA + 2-byte range per entry */
467 uint64_t entry = le64_to_cpu(buffer[i]);
468 uint64_t sector = entry & 0x0000ffffffffffffULL;
469 uint16_t count = entry >> 48;
470
471 if (count == 0) {
472 continue;
473 }
474
475 if (!ide_sect_range_ok(s, sector, count)) {
476 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_UNMAP);
477 iocb->ret = -EINVAL;
478 goto done;
479 }
480
481 block_acct_start(blk_get_stats(s->blk), &s->acct,
482 count << BDRV_SECTOR_BITS, BLOCK_ACCT_UNMAP);
483
484 /* Got an entry! Submit and exit. */
485 iocb->aiocb = blk_aio_pdiscard(s->blk,
486 sector << BDRV_SECTOR_BITS,
487 count << BDRV_SECTOR_BITS,
488 ide_issue_trim_cb, opaque);
489 return;
490 }
491
492 iocb->j++;
493 iocb->i = -1;
494 }
495 } else {
496 iocb->ret = ret;
497 }
498
499 done:
500 iocb->aiocb = NULL;
501 if (iocb->bh) {
502 replay_bh_schedule_event(iocb->bh);
503 }
504 }
505
506 BlockAIOCB *ide_issue_trim(
507 int64_t offset, QEMUIOVector *qiov,
508 BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
509 {
510 IDEState *s = opaque;
511 TrimAIOCB *iocb;
512
513 iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
514 iocb->s = s;
515 iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
516 iocb->ret = 0;
517 iocb->qiov = qiov;
518 iocb->i = -1;
519 iocb->j = 0;
520 ide_issue_trim_cb(iocb, 0);
521 return &iocb->common;
522 }
523
524 void ide_abort_command(IDEState *s)
525 {
526 ide_transfer_stop(s);
527 s->status = READY_STAT | ERR_STAT;
528 s->error = ABRT_ERR;
529 }
530
531 static void ide_set_retry(IDEState *s)
532 {
533 s->bus->retry_unit = s->unit;
534 s->bus->retry_sector_num = ide_get_sector(s);
535 s->bus->retry_nsector = s->nsector;
536 }
537
538 static void ide_clear_retry(IDEState *s)
539 {
540 s->bus->retry_unit = -1;
541 s->bus->retry_sector_num = 0;
542 s->bus->retry_nsector = 0;
543 }
544
545 /* prepare data transfer and tell what to do after */
546 bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size,
547 EndTransferFunc *end_transfer_func)
548 {
549 s->data_ptr = buf;
550 s->data_end = buf + size;
551 ide_set_retry(s);
552 if (!(s->status & ERR_STAT)) {
553 s->status |= DRQ_STAT;
554 }
555 if (!s->bus->dma->ops->pio_transfer) {
556 s->end_transfer_func = end_transfer_func;
557 return false;
558 }
559 s->bus->dma->ops->pio_transfer(s->bus->dma);
560 return true;
561 }
562
563 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
564 EndTransferFunc *end_transfer_func)
565 {
566 if (ide_transfer_start_norecurse(s, buf, size, end_transfer_func)) {
567 end_transfer_func(s);
568 }
569 }
570
571 static void ide_cmd_done(IDEState *s)
572 {
573 if (s->bus->dma->ops->cmd_done) {
574 s->bus->dma->ops->cmd_done(s->bus->dma);
575 }
576 }
577
578 static void ide_transfer_halt(IDEState *s)
579 {
580 s->end_transfer_func = ide_transfer_stop;
581 s->data_ptr = s->io_buffer;
582 s->data_end = s->io_buffer;
583 s->status &= ~DRQ_STAT;
584 }
585
586 void ide_transfer_stop(IDEState *s)
587 {
588 ide_transfer_halt(s);
589 ide_cmd_done(s);
590 }
591
592 int64_t ide_get_sector(IDEState *s)
593 {
594 int64_t sector_num;
595 if (s->select & (ATA_DEV_LBA)) {
596 if (s->lba48) {
597 sector_num = ((int64_t)s->hob_hcyl << 40) |
598 ((int64_t) s->hob_lcyl << 32) |
599 ((int64_t) s->hob_sector << 24) |
600 ((int64_t) s->hcyl << 16) |
601 ((int64_t) s->lcyl << 8) | s->sector;
602 } else {
603 /* LBA28 */
604 sector_num = ((s->select & (ATA_DEV_LBA_MSB)) << 24) |
605 (s->hcyl << 16) | (s->lcyl << 8) | s->sector;
606 }
607 } else {
608 /* CHS */
609 sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
610 (s->select & (ATA_DEV_HS)) * s->sectors + (s->sector - 1);
611 }
612
613 return sector_num;
614 }
615
616 void ide_set_sector(IDEState *s, int64_t sector_num)
617 {
618 unsigned int cyl, r;
619 if (s->select & (ATA_DEV_LBA)) {
620 if (s->lba48) {
621 s->sector = sector_num;
622 s->lcyl = sector_num >> 8;
623 s->hcyl = sector_num >> 16;
624 s->hob_sector = sector_num >> 24;
625 s->hob_lcyl = sector_num >> 32;
626 s->hob_hcyl = sector_num >> 40;
627 } else {
628 /* LBA28 */
629 s->select = (s->select & ~(ATA_DEV_LBA_MSB)) |
630 ((sector_num >> 24) & (ATA_DEV_LBA_MSB));
631 s->hcyl = (sector_num >> 16);
632 s->lcyl = (sector_num >> 8);
633 s->sector = (sector_num);
634 }
635 } else {
636 /* CHS */
637 cyl = sector_num / (s->heads * s->sectors);
638 r = sector_num % (s->heads * s->sectors);
639 s->hcyl = cyl >> 8;
640 s->lcyl = cyl;
641 s->select = (s->select & ~(ATA_DEV_HS)) |
642 ((r / s->sectors) & (ATA_DEV_HS));
643 s->sector = (r % s->sectors) + 1;
644 }
645 }
646
647 static void ide_rw_error(IDEState *s) {
648 ide_abort_command(s);
649 ide_set_irq(s->bus);
650 }
651
652 static void ide_buffered_readv_cb(void *opaque, int ret)
653 {
654 IDEBufferedRequest *req = opaque;
655 if (!req->orphaned) {
656 if (!ret) {
657 assert(req->qiov.size == req->original_qiov->size);
658 qemu_iovec_from_buf(req->original_qiov, 0,
659 req->qiov.local_iov.iov_base,
660 req->original_qiov->size);
661 }
662 req->original_cb(req->original_opaque, ret);
663 }
664 QLIST_REMOVE(req, list);
665 qemu_vfree(qemu_iovec_buf(&req->qiov));
666 g_free(req);
667 }
668
669 #define MAX_BUFFERED_REQS 16
670
671 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
672 QEMUIOVector *iov, int nb_sectors,
673 BlockCompletionFunc *cb, void *opaque)
674 {
675 BlockAIOCB *aioreq;
676 IDEBufferedRequest *req;
677 int c = 0;
678
679 QLIST_FOREACH(req, &s->buffered_requests, list) {
680 c++;
681 }
682 if (c > MAX_BUFFERED_REQS) {
683 return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
684 }
685
686 req = g_new0(IDEBufferedRequest, 1);
687 req->original_qiov = iov;
688 req->original_cb = cb;
689 req->original_opaque = opaque;
690 qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size),
691 iov->size);
692
693 aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
694 &req->qiov, 0, ide_buffered_readv_cb, req);
695
696 QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
697 return aioreq;
698 }
699
700 /**
701 * Cancel all pending DMA requests.
702 * Any buffered DMA requests are instantly canceled,
703 * but any pending unbuffered DMA requests must be waited on.
704 */
705 void ide_cancel_dma_sync(IDEState *s)
706 {
707 IDEBufferedRequest *req;
708
709 /* First invoke the callbacks of all buffered requests
710 * and flag those requests as orphaned. Ideally there
711 * are no unbuffered (Scatter Gather DMA Requests or
712 * write requests) pending and we can avoid to drain. */
713 QLIST_FOREACH(req, &s->buffered_requests, list) {
714 if (!req->orphaned) {
715 trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
716 req->original_cb(req->original_opaque, -ECANCELED);
717 }
718 req->orphaned = true;
719 }
720
721 /*
722 * We can't cancel Scatter Gather DMA in the middle of the
723 * operation or a partial (not full) DMA transfer would reach
724 * the storage so we wait for completion instead (we behave
725 * like if the DMA was completed by the time the guest trying
726 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
727 * set).
728 *
729 * In the future we'll be able to safely cancel the I/O if the
730 * whole DMA operation will be submitted to disk with a single
731 * aio operation with preadv/pwritev.
732 */
733 if (s->bus->dma->aiocb) {
734 trace_ide_cancel_dma_sync_remaining();
735 blk_drain(s->blk);
736 assert(s->bus->dma->aiocb == NULL);
737 }
738 }
739
740 static void ide_sector_read(IDEState *s);
741
742 static void ide_sector_read_cb(void *opaque, int ret)
743 {
744 IDEState *s = opaque;
745 int n;
746
747 s->pio_aiocb = NULL;
748 s->status &= ~BUSY_STAT;
749
750 if (ret != 0) {
751 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
752 IDE_RETRY_READ)) {
753 return;
754 }
755 }
756
757 block_acct_done(blk_get_stats(s->blk), &s->acct);
758
759 n = s->nsector;
760 if (n > s->req_nb_sectors) {
761 n = s->req_nb_sectors;
762 }
763
764 ide_set_sector(s, ide_get_sector(s) + n);
765 s->nsector -= n;
766 /* Allow the guest to read the io_buffer */
767 ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
768 ide_set_irq(s->bus);
769 }
770
771 static void ide_sector_read(IDEState *s)
772 {
773 int64_t sector_num;
774 int n;
775
776 s->status = READY_STAT | SEEK_STAT;
777 s->error = 0; /* not needed by IDE spec, but needed by Windows */
778 sector_num = ide_get_sector(s);
779 n = s->nsector;
780
781 if (n == 0) {
782 ide_transfer_stop(s);
783 return;
784 }
785
786 s->status |= BUSY_STAT;
787
788 if (n > s->req_nb_sectors) {
789 n = s->req_nb_sectors;
790 }
791
792 trace_ide_sector_read(sector_num, n);
793
794 if (!ide_sect_range_ok(s, sector_num, n)) {
795 ide_rw_error(s);
796 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
797 return;
798 }
799
800 qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
801
802 block_acct_start(blk_get_stats(s->blk), &s->acct,
803 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
804 s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
805 ide_sector_read_cb, s);
806 }
807
808 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
809 {
810 if (s->bus->dma->ops->commit_buf) {
811 s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
812 }
813 s->io_buffer_offset += tx_bytes;
814 qemu_sglist_destroy(&s->sg);
815 }
816
817 void ide_set_inactive(IDEState *s, bool more)
818 {
819 s->bus->dma->aiocb = NULL;
820 ide_clear_retry(s);
821 if (s->bus->dma->ops->set_inactive) {
822 s->bus->dma->ops->set_inactive(s->bus->dma, more);
823 }
824 ide_cmd_done(s);
825 }
826
827 void ide_dma_error(IDEState *s)
828 {
829 dma_buf_commit(s, 0);
830 ide_abort_command(s);
831 ide_set_inactive(s, false);
832 ide_set_irq(s->bus);
833 }
834
835 int ide_handle_rw_error(IDEState *s, int error, int op)
836 {
837 bool is_read = (op & IDE_RETRY_READ) != 0;
838 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
839
840 if (action == BLOCK_ERROR_ACTION_STOP) {
841 assert(s->bus->retry_unit == s->unit);
842 s->bus->error_status = op;
843 } else if (action == BLOCK_ERROR_ACTION_REPORT) {
844 block_acct_failed(blk_get_stats(s->blk), &s->acct);
845 if (IS_IDE_RETRY_DMA(op)) {
846 ide_dma_error(s);
847 } else if (IS_IDE_RETRY_ATAPI(op)) {
848 ide_atapi_io_error(s, -error);
849 } else {
850 ide_rw_error(s);
851 }
852 }
853 blk_error_action(s->blk, action, is_read, error);
854 return action != BLOCK_ERROR_ACTION_IGNORE;
855 }
856
857 static void ide_dma_cb(void *opaque, int ret)
858 {
859 IDEState *s = opaque;
860 int n;
861 int64_t sector_num;
862 uint64_t offset;
863 bool stay_active = false;
864 int32_t prep_size = 0;
865
866 if (ret == -EINVAL) {
867 ide_dma_error(s);
868 return;
869 }
870
871 if (ret < 0) {
872 if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
873 s->bus->dma->aiocb = NULL;
874 dma_buf_commit(s, 0);
875 return;
876 }
877 }
878
879 if (s->io_buffer_size > s->nsector * 512) {
880 /*
881 * The PRDs were longer than needed for this request.
882 * The Active bit must remain set after the request completes.
883 */
884 n = s->nsector;
885 stay_active = true;
886 } else {
887 n = s->io_buffer_size >> 9;
888 }
889
890 sector_num = ide_get_sector(s);
891 if (n > 0) {
892 assert(n * 512 == s->sg.size);
893 dma_buf_commit(s, s->sg.size);
894 sector_num += n;
895 ide_set_sector(s, sector_num);
896 s->nsector -= n;
897 }
898
899 /* end of transfer ? */
900 if (s->nsector == 0) {
901 s->status = READY_STAT | SEEK_STAT;
902 ide_set_irq(s->bus);
903 goto eot;
904 }
905
906 /* launch next transfer */
907 n = s->nsector;
908 s->io_buffer_index = 0;
909 s->io_buffer_size = n * 512;
910 prep_size = s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size);
911 /* prepare_buf() must succeed and respect the limit */
912 assert(prep_size >= 0 && prep_size <= n * 512);
913
914 /*
915 * Now prep_size stores the number of bytes in the sglist, and
916 * s->io_buffer_size stores the number of bytes described by the PRDs.
917 */
918
919 if (prep_size < n * 512) {
920 /*
921 * The PRDs are too short for this request. Error condition!
922 * Reset the Active bit and don't raise the interrupt.
923 */
924 s->status = READY_STAT | SEEK_STAT;
925 dma_buf_commit(s, 0);
926 goto eot;
927 }
928
929 trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
930
931 if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
932 !ide_sect_range_ok(s, sector_num, n)) {
933 ide_dma_error(s);
934 block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
935 return;
936 }
937
938 offset = sector_num << BDRV_SECTOR_BITS;
939 switch (s->dma_cmd) {
940 case IDE_DMA_READ:
941 s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
942 BDRV_SECTOR_SIZE, ide_dma_cb, s);
943 break;
944 case IDE_DMA_WRITE:
945 s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
946 BDRV_SECTOR_SIZE, ide_dma_cb, s);
947 break;
948 case IDE_DMA_TRIM:
949 s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
950 &s->sg, offset, BDRV_SECTOR_SIZE,
951 ide_issue_trim, s, ide_dma_cb, s,
952 DMA_DIRECTION_TO_DEVICE);
953 break;
954 default:
955 abort();
956 }
957 return;
958
959 eot:
960 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
961 block_acct_done(blk_get_stats(s->blk), &s->acct);
962 }
963 ide_set_inactive(s, stay_active);
964 }
965
966 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
967 {
968 s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
969 s->io_buffer_size = 0;
970 s->dma_cmd = dma_cmd;
971
972 switch (dma_cmd) {
973 case IDE_DMA_READ:
974 block_acct_start(blk_get_stats(s->blk), &s->acct,
975 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
976 break;
977 case IDE_DMA_WRITE:
978 block_acct_start(blk_get_stats(s->blk), &s->acct,
979 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
980 break;
981 default:
982 break;
983 }
984
985 ide_start_dma(s, ide_dma_cb);
986 }
987
988 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
989 {
990 s->io_buffer_index = 0;
991 ide_set_retry(s);
992 if (s->bus->dma->ops->start_dma) {
993 s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
994 }
995 }
996
997 static void ide_sector_write(IDEState *s);
998
999 static void ide_sector_write_timer_cb(void *opaque)
1000 {
1001 IDEState *s = opaque;
1002 ide_set_irq(s->bus);
1003 }
1004
1005 static void ide_sector_write_cb(void *opaque, int ret)
1006 {
1007 IDEState *s = opaque;
1008 int n;
1009
1010 s->pio_aiocb = NULL;
1011 s->status &= ~BUSY_STAT;
1012
1013 if (ret != 0) {
1014 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
1015 return;
1016 }
1017 }
1018
1019 block_acct_done(blk_get_stats(s->blk), &s->acct);
1020
1021 n = s->nsector;
1022 if (n > s->req_nb_sectors) {
1023 n = s->req_nb_sectors;
1024 }
1025 s->nsector -= n;
1026
1027 ide_set_sector(s, ide_get_sector(s) + n);
1028 if (s->nsector == 0) {
1029 /* no more sectors to write */
1030 ide_transfer_stop(s);
1031 } else {
1032 int n1 = s->nsector;
1033 if (n1 > s->req_nb_sectors) {
1034 n1 = s->req_nb_sectors;
1035 }
1036 ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
1037 ide_sector_write);
1038 }
1039
1040 if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
1041 /* It seems there is a bug in the Windows 2000 installer HDD
1042 IDE driver which fills the disk with empty logs when the
1043 IDE write IRQ comes too early. This hack tries to correct
1044 that at the expense of slower write performances. Use this
1045 option _only_ to install Windows 2000. You must disable it
1046 for normal use. */
1047 timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1048 (NANOSECONDS_PER_SECOND / 1000));
1049 } else {
1050 ide_set_irq(s->bus);
1051 }
1052 }
1053
1054 static void ide_sector_write(IDEState *s)
1055 {
1056 int64_t sector_num;
1057 int n;
1058
1059 s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1060 sector_num = ide_get_sector(s);
1061
1062 n = s->nsector;
1063 if (n > s->req_nb_sectors) {
1064 n = s->req_nb_sectors;
1065 }
1066
1067 trace_ide_sector_write(sector_num, n);
1068
1069 if (!ide_sect_range_ok(s, sector_num, n)) {
1070 ide_rw_error(s);
1071 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1072 return;
1073 }
1074
1075 qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
1076
1077 block_acct_start(blk_get_stats(s->blk), &s->acct,
1078 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1079 s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1080 &s->qiov, 0, ide_sector_write_cb, s);
1081 }
1082
1083 static void ide_flush_cb(void *opaque, int ret)
1084 {
1085 IDEState *s = opaque;
1086
1087 s->pio_aiocb = NULL;
1088
1089 if (ret < 0) {
1090 /* XXX: What sector number to set here? */
1091 if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1092 return;
1093 }
1094 }
1095
1096 if (s->blk) {
1097 block_acct_done(blk_get_stats(s->blk), &s->acct);
1098 }
1099 s->status = READY_STAT | SEEK_STAT;
1100 ide_cmd_done(s);
1101 ide_set_irq(s->bus);
1102 }
1103
1104 static void ide_flush_cache(IDEState *s)
1105 {
1106 if (s->blk == NULL) {
1107 ide_flush_cb(s, 0);
1108 return;
1109 }
1110
1111 s->status |= BUSY_STAT;
1112 ide_set_retry(s);
1113 block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1114 s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1115 }
1116
1117 static void ide_cfata_metadata_inquiry(IDEState *s)
1118 {
1119 uint16_t *p;
1120 uint32_t spd;
1121
1122 p = (uint16_t *) s->io_buffer;
1123 memset(p, 0, 0x200);
1124 spd = ((s->mdata_size - 1) >> 9) + 1;
1125
1126 put_le16(p + 0, 0x0001); /* Data format revision */
1127 put_le16(p + 1, 0x0000); /* Media property: silicon */
1128 put_le16(p + 2, s->media_changed); /* Media status */
1129 put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */
1130 put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */
1131 put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */
1132 put_le16(p + 6, spd >> 16); /* Sectors per device (high) */
1133 }
1134
1135 static void ide_cfata_metadata_read(IDEState *s)
1136 {
1137 uint16_t *p;
1138
1139 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1140 s->status = ERR_STAT;
1141 s->error = ABRT_ERR;
1142 return;
1143 }
1144
1145 p = (uint16_t *) s->io_buffer;
1146 memset(p, 0, 0x200);
1147
1148 put_le16(p + 0, s->media_changed); /* Media status */
1149 memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1150 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1151 s->nsector << 9), 0x200 - 2));
1152 }
1153
1154 static void ide_cfata_metadata_write(IDEState *s)
1155 {
1156 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1157 s->status = ERR_STAT;
1158 s->error = ABRT_ERR;
1159 return;
1160 }
1161
1162 s->media_changed = 0;
1163
1164 memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1165 s->io_buffer + 2,
1166 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1167 s->nsector << 9), 0x200 - 2));
1168 }
1169
1170 /* called when the inserted state of the media has changed */
1171 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1172 {
1173 IDEState *s = opaque;
1174 uint64_t nb_sectors;
1175
1176 s->tray_open = !load;
1177 blk_get_geometry(s->blk, &nb_sectors);
1178 s->nb_sectors = nb_sectors;
1179
1180 /*
1181 * First indicate to the guest that a CD has been removed. That's
1182 * done on the next command the guest sends us.
1183 *
1184 * Then we set UNIT_ATTENTION, by which the guest will
1185 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1186 */
1187 s->cdrom_changed = 1;
1188 s->events.new_media = true;
1189 s->events.eject_request = false;
1190 ide_set_irq(s->bus);
1191 }
1192
1193 static void ide_cd_eject_request_cb(void *opaque, bool force)
1194 {
1195 IDEState *s = opaque;
1196
1197 s->events.eject_request = true;
1198 if (force) {
1199 s->tray_locked = false;
1200 }
1201 ide_set_irq(s->bus);
1202 }
1203
1204 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1205 {
1206 s->lba48 = lba48;
1207
1208 /* handle the 'magic' 0 nsector count conversion here. to avoid
1209 * fiddling with the rest of the read logic, we just store the
1210 * full sector count in ->nsector and ignore ->hob_nsector from now
1211 */
1212 if (!s->lba48) {
1213 if (!s->nsector)
1214 s->nsector = 256;
1215 } else {
1216 if (!s->nsector && !s->hob_nsector)
1217 s->nsector = 65536;
1218 else {
1219 int lo = s->nsector;
1220 int hi = s->hob_nsector;
1221
1222 s->nsector = (hi << 8) | lo;
1223 }
1224 }
1225 }
1226
1227 static void ide_clear_hob(IDEBus *bus)
1228 {
1229 /* any write clears HOB high bit of device control register */
1230 bus->cmd &= ~(IDE_CTRL_HOB);
1231 }
1232
1233 /* IOport [W]rite [R]egisters */
1234 enum ATA_IOPORT_WR {
1235 ATA_IOPORT_WR_DATA = 0,
1236 ATA_IOPORT_WR_FEATURES = 1,
1237 ATA_IOPORT_WR_SECTOR_COUNT = 2,
1238 ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1239 ATA_IOPORT_WR_CYLINDER_LOW = 4,
1240 ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1241 ATA_IOPORT_WR_DEVICE_HEAD = 6,
1242 ATA_IOPORT_WR_COMMAND = 7,
1243 ATA_IOPORT_WR_NUM_REGISTERS,
1244 };
1245
1246 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1247 [ATA_IOPORT_WR_DATA] = "Data",
1248 [ATA_IOPORT_WR_FEATURES] = "Features",
1249 [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1250 [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1251 [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1252 [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1253 [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1254 [ATA_IOPORT_WR_COMMAND] = "Command"
1255 };
1256
1257 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1258 {
1259 IDEBus *bus = opaque;
1260 IDEState *s = idebus_active_if(bus);
1261 int reg_num = addr & 7;
1262
1263 trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1264
1265 /* ignore writes to command block while busy with previous command */
1266 if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1267 return;
1268 }
1269
1270 /* NOTE: Device0 and Device1 both receive incoming register writes.
1271 * (They're on the same bus! They have to!) */
1272
1273 switch (reg_num) {
1274 case 0:
1275 break;
1276 case ATA_IOPORT_WR_FEATURES:
1277 ide_clear_hob(bus);
1278 bus->ifs[0].hob_feature = bus->ifs[0].feature;
1279 bus->ifs[1].hob_feature = bus->ifs[1].feature;
1280 bus->ifs[0].feature = val;
1281 bus->ifs[1].feature = val;
1282 break;
1283 case ATA_IOPORT_WR_SECTOR_COUNT:
1284 ide_clear_hob(bus);
1285 bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1286 bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1287 bus->ifs[0].nsector = val;
1288 bus->ifs[1].nsector = val;
1289 break;
1290 case ATA_IOPORT_WR_SECTOR_NUMBER:
1291 ide_clear_hob(bus);
1292 bus->ifs[0].hob_sector = bus->ifs[0].sector;
1293 bus->ifs[1].hob_sector = bus->ifs[1].sector;
1294 bus->ifs[0].sector = val;
1295 bus->ifs[1].sector = val;
1296 break;
1297 case ATA_IOPORT_WR_CYLINDER_LOW:
1298 ide_clear_hob(bus);
1299 bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1300 bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1301 bus->ifs[0].lcyl = val;
1302 bus->ifs[1].lcyl = val;
1303 break;
1304 case ATA_IOPORT_WR_CYLINDER_HIGH:
1305 ide_clear_hob(bus);
1306 bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1307 bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1308 bus->ifs[0].hcyl = val;
1309 bus->ifs[1].hcyl = val;
1310 break;
1311 case ATA_IOPORT_WR_DEVICE_HEAD:
1312 ide_clear_hob(bus);
1313 bus->ifs[0].select = val | (ATA_DEV_ALWAYS_ON);
1314 bus->ifs[1].select = val | (ATA_DEV_ALWAYS_ON);
1315 /* select drive */
1316 bus->unit = (val & (ATA_DEV_SELECT)) ? 1 : 0;
1317 break;
1318 default:
1319 case ATA_IOPORT_WR_COMMAND:
1320 ide_clear_hob(bus);
1321 qemu_irq_lower(bus->irq);
1322 ide_exec_cmd(bus, val);
1323 break;
1324 }
1325 }
1326
1327 static void ide_reset(IDEState *s)
1328 {
1329 trace_ide_reset(s);
1330
1331 if (s->pio_aiocb) {
1332 blk_aio_cancel(s->pio_aiocb);
1333 s->pio_aiocb = NULL;
1334 }
1335
1336 if (s->drive_kind == IDE_CFATA)
1337 s->mult_sectors = 0;
1338 else
1339 s->mult_sectors = MAX_MULT_SECTORS;
1340 /* ide regs */
1341 s->feature = 0;
1342 s->error = 0;
1343 s->nsector = 0;
1344 s->sector = 0;
1345 s->lcyl = 0;
1346 s->hcyl = 0;
1347
1348 /* lba48 */
1349 s->hob_feature = 0;
1350 s->hob_sector = 0;
1351 s->hob_nsector = 0;
1352 s->hob_lcyl = 0;
1353 s->hob_hcyl = 0;
1354
1355 s->select = (ATA_DEV_ALWAYS_ON);
1356 s->status = READY_STAT | SEEK_STAT;
1357
1358 s->lba48 = 0;
1359
1360 /* ATAPI specific */
1361 s->sense_key = 0;
1362 s->asc = 0;
1363 s->cdrom_changed = 0;
1364 s->packet_transfer_size = 0;
1365 s->elementary_transfer_size = 0;
1366 s->io_buffer_index = 0;
1367 s->cd_sector_size = 0;
1368 s->atapi_dma = 0;
1369 s->tray_locked = 0;
1370 s->tray_open = 0;
1371 /* ATA DMA state */
1372 s->io_buffer_size = 0;
1373 s->req_nb_sectors = 0;
1374
1375 ide_set_signature(s);
1376 /* init the transfer handler so that 0xffff is returned on data
1377 accesses */
1378 s->end_transfer_func = ide_dummy_transfer_stop;
1379 ide_dummy_transfer_stop(s);
1380 s->media_changed = 0;
1381 }
1382
1383 static bool cmd_nop(IDEState *s, uint8_t cmd)
1384 {
1385 return true;
1386 }
1387
1388 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1389 {
1390 /* Halt PIO (in the DRQ phase), then DMA */
1391 ide_transfer_halt(s);
1392 ide_cancel_dma_sync(s);
1393
1394 /* Reset any PIO commands, reset signature, etc */
1395 ide_reset(s);
1396
1397 /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1398 * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1399 s->status = 0x00;
1400
1401 /* Do not overwrite status register */
1402 return false;
1403 }
1404
1405 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1406 {
1407 switch (s->feature) {
1408 case DSM_TRIM:
1409 if (s->blk) {
1410 ide_sector_start_dma(s, IDE_DMA_TRIM);
1411 return false;
1412 }
1413 break;
1414 }
1415
1416 ide_abort_command(s);
1417 return true;
1418 }
1419
1420 static bool cmd_identify(IDEState *s, uint8_t cmd)
1421 {
1422 if (s->blk && s->drive_kind != IDE_CD) {
1423 if (s->drive_kind != IDE_CFATA) {
1424 ide_identify(s);
1425 } else {
1426 ide_cfata_identify(s);
1427 }
1428 s->status = READY_STAT | SEEK_STAT;
1429 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1430 ide_set_irq(s->bus);
1431 return false;
1432 } else {
1433 if (s->drive_kind == IDE_CD) {
1434 ide_set_signature(s);
1435 }
1436 ide_abort_command(s);
1437 }
1438
1439 return true;
1440 }
1441
1442 static bool cmd_verify(IDEState *s, uint8_t cmd)
1443 {
1444 bool lba48 = (cmd == WIN_VERIFY_EXT);
1445
1446 /* do sector number check ? */
1447 ide_cmd_lba48_transform(s, lba48);
1448
1449 return true;
1450 }
1451
1452 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1453 {
1454 if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1455 /* Disable Read and Write Multiple */
1456 s->mult_sectors = 0;
1457 } else if ((s->nsector & 0xff) != 0 &&
1458 ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1459 (s->nsector & (s->nsector - 1)) != 0)) {
1460 ide_abort_command(s);
1461 } else {
1462 s->mult_sectors = s->nsector & 0xff;
1463 }
1464
1465 return true;
1466 }
1467
1468 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1469 {
1470 bool lba48 = (cmd == WIN_MULTREAD_EXT);
1471
1472 if (!s->blk || !s->mult_sectors) {
1473 ide_abort_command(s);
1474 return true;
1475 }
1476
1477 ide_cmd_lba48_transform(s, lba48);
1478 s->req_nb_sectors = s->mult_sectors;
1479 ide_sector_read(s);
1480 return false;
1481 }
1482
1483 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1484 {
1485 bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1486 int n;
1487
1488 if (!s->blk || !s->mult_sectors) {
1489 ide_abort_command(s);
1490 return true;
1491 }
1492
1493 ide_cmd_lba48_transform(s, lba48);
1494
1495 s->req_nb_sectors = s->mult_sectors;
1496 n = MIN(s->nsector, s->req_nb_sectors);
1497
1498 s->status = SEEK_STAT | READY_STAT;
1499 ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1500
1501 s->media_changed = 1;
1502
1503 return false;
1504 }
1505
1506 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1507 {
1508 bool lba48 = (cmd == WIN_READ_EXT);
1509
1510 if (s->drive_kind == IDE_CD) {
1511 ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1512 ide_abort_command(s);
1513 return true;
1514 }
1515
1516 if (!s->blk) {
1517 ide_abort_command(s);
1518 return true;
1519 }
1520
1521 ide_cmd_lba48_transform(s, lba48);
1522 s->req_nb_sectors = 1;
1523 ide_sector_read(s);
1524
1525 return false;
1526 }
1527
1528 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1529 {
1530 bool lba48 = (cmd == WIN_WRITE_EXT);
1531
1532 if (!s->blk) {
1533 ide_abort_command(s);
1534 return true;
1535 }
1536
1537 ide_cmd_lba48_transform(s, lba48);
1538
1539 s->req_nb_sectors = 1;
1540 s->status = SEEK_STAT | READY_STAT;
1541 ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1542
1543 s->media_changed = 1;
1544
1545 return false;
1546 }
1547
1548 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1549 {
1550 bool lba48 = (cmd == WIN_READDMA_EXT);
1551
1552 if (!s->blk) {
1553 ide_abort_command(s);
1554 return true;
1555 }
1556
1557 ide_cmd_lba48_transform(s, lba48);
1558 ide_sector_start_dma(s, IDE_DMA_READ);
1559
1560 return false;
1561 }
1562
1563 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1564 {
1565 bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1566
1567 if (!s->blk) {
1568 ide_abort_command(s);
1569 return true;
1570 }
1571
1572 ide_cmd_lba48_transform(s, lba48);
1573 ide_sector_start_dma(s, IDE_DMA_WRITE);
1574
1575 s->media_changed = 1;
1576
1577 return false;
1578 }
1579
1580 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1581 {
1582 ide_flush_cache(s);
1583 return false;
1584 }
1585
1586 static bool cmd_seek(IDEState *s, uint8_t cmd)
1587 {
1588 /* XXX: Check that seek is within bounds */
1589 return true;
1590 }
1591
1592 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1593 {
1594 bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1595
1596 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1597 if (s->nb_sectors == 0) {
1598 ide_abort_command(s);
1599 return true;
1600 }
1601
1602 ide_cmd_lba48_transform(s, lba48);
1603 ide_set_sector(s, s->nb_sectors - 1);
1604
1605 return true;
1606 }
1607
1608 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1609 {
1610 s->nsector = 0xff; /* device active or idle */
1611 return true;
1612 }
1613
1614 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1615 {
1616 uint16_t *identify_data;
1617
1618 if (!s->blk) {
1619 ide_abort_command(s);
1620 return true;
1621 }
1622
1623 /* XXX: valid for CDROM ? */
1624 switch (s->feature) {
1625 case 0x02: /* write cache enable */
1626 blk_set_enable_write_cache(s->blk, true);
1627 identify_data = (uint16_t *)s->identify_data;
1628 put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1629 return true;
1630 case 0x82: /* write cache disable */
1631 blk_set_enable_write_cache(s->blk, false);
1632 identify_data = (uint16_t *)s->identify_data;
1633 put_le16(identify_data + 85, (1 << 14) | 1);
1634 ide_flush_cache(s);
1635 return false;
1636 case 0xcc: /* reverting to power-on defaults enable */
1637 case 0x66: /* reverting to power-on defaults disable */
1638 case 0xaa: /* read look-ahead enable */
1639 case 0x55: /* read look-ahead disable */
1640 case 0x05: /* set advanced power management mode */
1641 case 0x85: /* disable advanced power management mode */
1642 case 0x69: /* NOP */
1643 case 0x67: /* NOP */
1644 case 0x96: /* NOP */
1645 case 0x9a: /* NOP */
1646 case 0x42: /* enable Automatic Acoustic Mode */
1647 case 0xc2: /* disable Automatic Acoustic Mode */
1648 return true;
1649 case 0x03: /* set transfer mode */
1650 {
1651 uint8_t val = s->nsector & 0x07;
1652 identify_data = (uint16_t *)s->identify_data;
1653
1654 switch (s->nsector >> 3) {
1655 case 0x00: /* pio default */
1656 case 0x01: /* pio mode */
1657 put_le16(identify_data + 62, 0x07);
1658 put_le16(identify_data + 63, 0x07);
1659 put_le16(identify_data + 88, 0x3f);
1660 break;
1661 case 0x02: /* sigle word dma mode*/
1662 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1663 put_le16(identify_data + 63, 0x07);
1664 put_le16(identify_data + 88, 0x3f);
1665 break;
1666 case 0x04: /* mdma mode */
1667 put_le16(identify_data + 62, 0x07);
1668 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1669 put_le16(identify_data + 88, 0x3f);
1670 break;
1671 case 0x08: /* udma mode */
1672 put_le16(identify_data + 62, 0x07);
1673 put_le16(identify_data + 63, 0x07);
1674 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1675 break;
1676 default:
1677 goto abort_cmd;
1678 }
1679 return true;
1680 }
1681 }
1682
1683 abort_cmd:
1684 ide_abort_command(s);
1685 return true;
1686 }
1687
1688
1689 /*** ATAPI commands ***/
1690
1691 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1692 {
1693 ide_atapi_identify(s);
1694 s->status = READY_STAT | SEEK_STAT;
1695 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1696 ide_set_irq(s->bus);
1697 return false;
1698 }
1699
1700 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1701 {
1702 ide_set_signature(s);
1703
1704 if (s->drive_kind == IDE_CD) {
1705 s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1706 * devices to return a clear status register
1707 * with READY_STAT *not* set. */
1708 s->error = 0x01;
1709 } else {
1710 s->status = READY_STAT | SEEK_STAT;
1711 /* The bits of the error register are not as usual for this command!
1712 * They are part of the regular output (this is why ERR_STAT isn't set)
1713 * Device 0 passed, Device 1 passed or not present. */
1714 s->error = 0x01;
1715 ide_set_irq(s->bus);
1716 }
1717
1718 return false;
1719 }
1720
1721 static bool cmd_packet(IDEState *s, uint8_t cmd)
1722 {
1723 /* overlapping commands not supported */
1724 if (s->feature & 0x02) {
1725 ide_abort_command(s);
1726 return true;
1727 }
1728
1729 s->status = READY_STAT | SEEK_STAT;
1730 s->atapi_dma = s->feature & 1;
1731 if (s->atapi_dma) {
1732 s->dma_cmd = IDE_DMA_ATAPI;
1733 }
1734 s->nsector = 1;
1735 ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1736 ide_atapi_cmd);
1737 return false;
1738 }
1739
1740
1741 /*** CF-ATA commands ***/
1742
1743 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1744 {
1745 s->error = 0x09; /* miscellaneous error */
1746 s->status = READY_STAT | SEEK_STAT;
1747 ide_set_irq(s->bus);
1748
1749 return false;
1750 }
1751
1752 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1753 {
1754 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1755 * required for Windows 8 to work with AHCI */
1756
1757 if (cmd == CFA_WEAR_LEVEL) {
1758 s->nsector = 0;
1759 }
1760
1761 if (cmd == CFA_ERASE_SECTORS) {
1762 s->media_changed = 1;
1763 }
1764
1765 return true;
1766 }
1767
1768 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1769 {
1770 s->status = READY_STAT | SEEK_STAT;
1771
1772 memset(s->io_buffer, 0, 0x200);
1773 s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */
1774 s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */
1775 s->io_buffer[0x02] = s->select; /* Head */
1776 s->io_buffer[0x03] = s->sector; /* Sector */
1777 s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */
1778 s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */
1779 s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */
1780 s->io_buffer[0x13] = 0x00; /* Erase flag */
1781 s->io_buffer[0x18] = 0x00; /* Hot count */
1782 s->io_buffer[0x19] = 0x00; /* Hot count */
1783 s->io_buffer[0x1a] = 0x01; /* Hot count */
1784
1785 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1786 ide_set_irq(s->bus);
1787
1788 return false;
1789 }
1790
1791 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1792 {
1793 switch (s->feature) {
1794 case 0x02: /* Inquiry Metadata Storage */
1795 ide_cfata_metadata_inquiry(s);
1796 break;
1797 case 0x03: /* Read Metadata Storage */
1798 ide_cfata_metadata_read(s);
1799 break;
1800 case 0x04: /* Write Metadata Storage */
1801 ide_cfata_metadata_write(s);
1802 break;
1803 default:
1804 ide_abort_command(s);
1805 return true;
1806 }
1807
1808 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1809 s->status = 0x00; /* NOTE: READY is _not_ set */
1810 ide_set_irq(s->bus);
1811
1812 return false;
1813 }
1814
1815 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1816 {
1817 switch (s->feature) {
1818 case 0x01: /* sense temperature in device */
1819 s->nsector = 0x50; /* +20 C */
1820 break;
1821 default:
1822 ide_abort_command(s);
1823 return true;
1824 }
1825
1826 return true;
1827 }
1828
1829
1830 /*** SMART commands ***/
1831
1832 static bool cmd_smart(IDEState *s, uint8_t cmd)
1833 {
1834 int n;
1835
1836 if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1837 goto abort_cmd;
1838 }
1839
1840 if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1841 goto abort_cmd;
1842 }
1843
1844 switch (s->feature) {
1845 case SMART_DISABLE:
1846 s->smart_enabled = 0;
1847 return true;
1848
1849 case SMART_ENABLE:
1850 s->smart_enabled = 1;
1851 return true;
1852
1853 case SMART_ATTR_AUTOSAVE:
1854 switch (s->sector) {
1855 case 0x00:
1856 s->smart_autosave = 0;
1857 break;
1858 case 0xf1:
1859 s->smart_autosave = 1;
1860 break;
1861 default:
1862 goto abort_cmd;
1863 }
1864 return true;
1865
1866 case SMART_STATUS:
1867 if (!s->smart_errors) {
1868 s->hcyl = 0xc2;
1869 s->lcyl = 0x4f;
1870 } else {
1871 s->hcyl = 0x2c;
1872 s->lcyl = 0xf4;
1873 }
1874 return true;
1875
1876 case SMART_READ_THRESH:
1877 memset(s->io_buffer, 0, 0x200);
1878 s->io_buffer[0] = 0x01; /* smart struct version */
1879
1880 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1881 s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1882 s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1883 }
1884
1885 /* checksum */
1886 for (n = 0; n < 511; n++) {
1887 s->io_buffer[511] += s->io_buffer[n];
1888 }
1889 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1890
1891 s->status = READY_STAT | SEEK_STAT;
1892 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1893 ide_set_irq(s->bus);
1894 return false;
1895
1896 case SMART_READ_DATA:
1897 memset(s->io_buffer, 0, 0x200);
1898 s->io_buffer[0] = 0x01; /* smart struct version */
1899
1900 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1901 int i;
1902 for (i = 0; i < 11; i++) {
1903 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1904 }
1905 }
1906
1907 s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1908 if (s->smart_selftest_count == 0) {
1909 s->io_buffer[363] = 0;
1910 } else {
1911 s->io_buffer[363] =
1912 s->smart_selftest_data[3 +
1913 (s->smart_selftest_count - 1) *
1914 24];
1915 }
1916 s->io_buffer[364] = 0x20;
1917 s->io_buffer[365] = 0x01;
1918 /* offline data collection capacity: execute + self-test*/
1919 s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1920 s->io_buffer[368] = 0x03; /* smart capability (1) */
1921 s->io_buffer[369] = 0x00; /* smart capability (2) */
1922 s->io_buffer[370] = 0x01; /* error logging supported */
1923 s->io_buffer[372] = 0x02; /* minutes for poll short test */
1924 s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1925 s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1926
1927 for (n = 0; n < 511; n++) {
1928 s->io_buffer[511] += s->io_buffer[n];
1929 }
1930 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1931
1932 s->status = READY_STAT | SEEK_STAT;
1933 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1934 ide_set_irq(s->bus);
1935 return false;
1936
1937 case SMART_READ_LOG:
1938 switch (s->sector) {
1939 case 0x01: /* summary smart error log */
1940 memset(s->io_buffer, 0, 0x200);
1941 s->io_buffer[0] = 0x01;
1942 s->io_buffer[1] = 0x00; /* no error entries */
1943 s->io_buffer[452] = s->smart_errors & 0xff;
1944 s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1945
1946 for (n = 0; n < 511; n++) {
1947 s->io_buffer[511] += s->io_buffer[n];
1948 }
1949 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1950 break;
1951 case 0x06: /* smart self test log */
1952 memset(s->io_buffer, 0, 0x200);
1953 s->io_buffer[0] = 0x01;
1954 if (s->smart_selftest_count == 0) {
1955 s->io_buffer[508] = 0;
1956 } else {
1957 s->io_buffer[508] = s->smart_selftest_count;
1958 for (n = 2; n < 506; n++) {
1959 s->io_buffer[n] = s->smart_selftest_data[n];
1960 }
1961 }
1962
1963 for (n = 0; n < 511; n++) {
1964 s->io_buffer[511] += s->io_buffer[n];
1965 }
1966 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1967 break;
1968 default:
1969 goto abort_cmd;
1970 }
1971 s->status = READY_STAT | SEEK_STAT;
1972 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1973 ide_set_irq(s->bus);
1974 return false;
1975
1976 case SMART_EXECUTE_OFFLINE:
1977 switch (s->sector) {
1978 case 0: /* off-line routine */
1979 case 1: /* short self test */
1980 case 2: /* extended self test */
1981 s->smart_selftest_count++;
1982 if (s->smart_selftest_count > 21) {
1983 s->smart_selftest_count = 1;
1984 }
1985 n = 2 + (s->smart_selftest_count - 1) * 24;
1986 s->smart_selftest_data[n] = s->sector;
1987 s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
1988 s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
1989 s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
1990 break;
1991 default:
1992 goto abort_cmd;
1993 }
1994 return true;
1995 }
1996
1997 abort_cmd:
1998 ide_abort_command(s);
1999 return true;
2000 }
2001
2002 #define HD_OK (1u << IDE_HD)
2003 #define CD_OK (1u << IDE_CD)
2004 #define CFA_OK (1u << IDE_CFATA)
2005 #define HD_CFA_OK (HD_OK | CFA_OK)
2006 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
2007
2008 /* Set the Disk Seek Completed status bit during completion */
2009 #define SET_DSC (1u << 8)
2010
2011 /* See ACS-2 T13/2015-D Table B.2 Command codes */
2012 static const struct {
2013 /* Returns true if the completion code should be run */
2014 bool (*handler)(IDEState *s, uint8_t cmd);
2015 int flags;
2016 } ide_cmd_table[0x100] = {
2017 /* NOP not implemented, mandatory for CD */
2018 [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK },
2019 [WIN_DSM] = { cmd_data_set_management, HD_CFA_OK },
2020 [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK },
2021 [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC},
2022 [WIN_READ] = { cmd_read_pio, ALL_OK },
2023 [WIN_READ_ONCE] = { cmd_read_pio, HD_CFA_OK },
2024 [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK },
2025 [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK },
2026 [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2027 [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK },
2028 [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK },
2029 [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK },
2030 [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK },
2031 [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK },
2032 [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK },
2033 [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK },
2034 [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK },
2035 [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC },
2036 [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC },
2037 [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC },
2038 [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC },
2039 [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK },
2040 [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK },
2041 [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC },
2042 [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK },
2043 [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK },
2044 [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK },
2045 [WIN_SETIDLE2] = { cmd_nop, HD_CFA_OK },
2046 [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2047 [WIN_SLEEPNOW2] = { cmd_nop, HD_CFA_OK },
2048 [WIN_PACKETCMD] = { cmd_packet, CD_OK },
2049 [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK },
2050 [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC },
2051 [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2052 [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2053 [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK },
2054 [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK },
2055 [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2056 [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK },
2057 [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK },
2058 [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK },
2059 [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK },
2060 [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK },
2061 [WIN_STANDBYNOW1] = { cmd_nop, HD_CFA_OK },
2062 [WIN_IDLEIMMEDIATE] = { cmd_nop, HD_CFA_OK },
2063 [WIN_STANDBY] = { cmd_nop, HD_CFA_OK },
2064 [WIN_SETIDLE1] = { cmd_nop, HD_CFA_OK },
2065 [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2066 [WIN_SLEEPNOW1] = { cmd_nop, HD_CFA_OK },
2067 [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK },
2068 [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK },
2069 [WIN_IDENTIFY] = { cmd_identify, ALL_OK },
2070 [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC },
2071 [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2072 [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2073 [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2074 };
2075
2076 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2077 {
2078 return cmd < ARRAY_SIZE(ide_cmd_table)
2079 && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2080 }
2081
2082 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2083 {
2084 IDEState *s;
2085 bool complete;
2086
2087 s = idebus_active_if(bus);
2088 trace_ide_exec_cmd(bus, s, val);
2089
2090 /* ignore commands to non existent slave */
2091 if (s != bus->ifs && !s->blk) {
2092 return;
2093 }
2094
2095 /* Only RESET is allowed while BSY and/or DRQ are set,
2096 * and only to ATAPI devices. */
2097 if (s->status & (BUSY_STAT|DRQ_STAT)) {
2098 if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2099 return;
2100 }
2101 }
2102
2103 if (!ide_cmd_permitted(s, val)) {
2104 ide_abort_command(s);
2105 ide_set_irq(s->bus);
2106 return;
2107 }
2108
2109 s->status = READY_STAT | BUSY_STAT;
2110 s->error = 0;
2111 s->io_buffer_offset = 0;
2112
2113 complete = ide_cmd_table[val].handler(s, val);
2114 if (complete) {
2115 s->status &= ~BUSY_STAT;
2116 assert(!!s->error == !!(s->status & ERR_STAT));
2117
2118 if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2119 s->status |= SEEK_STAT;
2120 }
2121
2122 ide_cmd_done(s);
2123 ide_set_irq(s->bus);
2124 }
2125 }
2126
2127 /* IOport [R]ead [R]egisters */
2128 enum ATA_IOPORT_RR {
2129 ATA_IOPORT_RR_DATA = 0,
2130 ATA_IOPORT_RR_ERROR = 1,
2131 ATA_IOPORT_RR_SECTOR_COUNT = 2,
2132 ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2133 ATA_IOPORT_RR_CYLINDER_LOW = 4,
2134 ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2135 ATA_IOPORT_RR_DEVICE_HEAD = 6,
2136 ATA_IOPORT_RR_STATUS = 7,
2137 ATA_IOPORT_RR_NUM_REGISTERS,
2138 };
2139
2140 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2141 [ATA_IOPORT_RR_DATA] = "Data",
2142 [ATA_IOPORT_RR_ERROR] = "Error",
2143 [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2144 [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2145 [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2146 [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2147 [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2148 [ATA_IOPORT_RR_STATUS] = "Status"
2149 };
2150
2151 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2152 {
2153 IDEBus *bus = opaque;
2154 IDEState *s = idebus_active_if(bus);
2155 uint32_t reg_num;
2156 int ret, hob;
2157
2158 reg_num = addr & 7;
2159 hob = bus->cmd & (IDE_CTRL_HOB);
2160 switch (reg_num) {
2161 case ATA_IOPORT_RR_DATA:
2162 ret = 0xff;
2163 break;
2164 case ATA_IOPORT_RR_ERROR:
2165 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2166 (s != bus->ifs && !s->blk)) {
2167 ret = 0;
2168 } else if (!hob) {
2169 ret = s->error;
2170 } else {
2171 ret = s->hob_feature;
2172 }
2173 break;
2174 case ATA_IOPORT_RR_SECTOR_COUNT:
2175 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2176 ret = 0;
2177 } else if (!hob) {
2178 ret = s->nsector & 0xff;
2179 } else {
2180 ret = s->hob_nsector;
2181 }
2182 break;
2183 case ATA_IOPORT_RR_SECTOR_NUMBER:
2184 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2185 ret = 0;
2186 } else if (!hob) {
2187 ret = s->sector;
2188 } else {
2189 ret = s->hob_sector;
2190 }
2191 break;
2192 case ATA_IOPORT_RR_CYLINDER_LOW:
2193 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2194 ret = 0;
2195 } else if (!hob) {
2196 ret = s->lcyl;
2197 } else {
2198 ret = s->hob_lcyl;
2199 }
2200 break;
2201 case ATA_IOPORT_RR_CYLINDER_HIGH:
2202 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2203 ret = 0;
2204 } else if (!hob) {
2205 ret = s->hcyl;
2206 } else {
2207 ret = s->hob_hcyl;
2208 }
2209 break;
2210 case ATA_IOPORT_RR_DEVICE_HEAD:
2211 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2212 ret = 0;
2213 } else {
2214 ret = s->select;
2215 }
2216 break;
2217 default:
2218 case ATA_IOPORT_RR_STATUS:
2219 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2220 (s != bus->ifs && !s->blk)) {
2221 ret = 0;
2222 } else {
2223 ret = s->status;
2224 }
2225 qemu_irq_lower(bus->irq);
2226 break;
2227 }
2228
2229 trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2230 return ret;
2231 }
2232
2233 uint32_t ide_status_read(void *opaque, uint32_t addr)
2234 {
2235 IDEBus *bus = opaque;
2236 IDEState *s = idebus_active_if(bus);
2237 int ret;
2238
2239 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2240 (s != bus->ifs && !s->blk)) {
2241 ret = 0;
2242 } else {
2243 ret = s->status;
2244 }
2245
2246 trace_ide_status_read(addr, ret, bus, s);
2247 return ret;
2248 }
2249
2250 static void ide_perform_srst(IDEState *s)
2251 {
2252 s->status |= BUSY_STAT;
2253
2254 /* Halt PIO (Via register state); PIO BH remains scheduled. */
2255 ide_transfer_halt(s);
2256
2257 /* Cancel DMA -- may drain block device and invoke callbacks */
2258 ide_cancel_dma_sync(s);
2259
2260 /* Cancel PIO callback, reset registers/signature, etc */
2261 ide_reset(s);
2262
2263 /* perform diagnostic */
2264 cmd_exec_dev_diagnostic(s, WIN_DIAGNOSE);
2265 }
2266
2267 static void ide_bus_perform_srst(void *opaque)
2268 {
2269 IDEBus *bus = opaque;
2270 IDEState *s;
2271 int i;
2272
2273 for (i = 0; i < 2; i++) {
2274 s = &bus->ifs[i];
2275 ide_perform_srst(s);
2276 }
2277
2278 bus->cmd &= ~IDE_CTRL_RESET;
2279 }
2280
2281 void ide_ctrl_write(void *opaque, uint32_t addr, uint32_t val)
2282 {
2283 IDEBus *bus = opaque;
2284 IDEState *s;
2285 int i;
2286
2287 trace_ide_ctrl_write(addr, val, bus);
2288
2289 /* Device0 and Device1 each have their own control register,
2290 * but QEMU models it as just one register in the controller. */
2291 if (!(bus->cmd & IDE_CTRL_RESET) && (val & IDE_CTRL_RESET)) {
2292 for (i = 0; i < 2; i++) {
2293 s = &bus->ifs[i];
2294 s->status |= BUSY_STAT;
2295 }
2296 replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
2297 ide_bus_perform_srst, bus);
2298 }
2299
2300 bus->cmd = val;
2301 }
2302
2303 /*
2304 * Returns true if the running PIO transfer is a PIO out (i.e. data is
2305 * transferred from the device to the guest), false if it's a PIO in
2306 */
2307 static bool ide_is_pio_out(IDEState *s)
2308 {
2309 if (s->end_transfer_func == ide_sector_write ||
2310 s->end_transfer_func == ide_atapi_cmd) {
2311 return false;
2312 } else if (s->end_transfer_func == ide_sector_read ||
2313 s->end_transfer_func == ide_transfer_stop ||
2314 s->end_transfer_func == ide_atapi_cmd_reply_end ||
2315 s->end_transfer_func == ide_dummy_transfer_stop) {
2316 return true;
2317 }
2318
2319 abort();
2320 }
2321
2322 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2323 {
2324 IDEBus *bus = opaque;
2325 IDEState *s = idebus_active_if(bus);
2326 uint8_t *p;
2327
2328 trace_ide_data_writew(addr, val, bus, s);
2329
2330 /* PIO data access allowed only when DRQ bit is set. The result of a write
2331 * during PIO out is indeterminate, just ignore it. */
2332 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2333 return;
2334 }
2335
2336 p = s->data_ptr;
2337 if (p + 2 > s->data_end) {
2338 return;
2339 }
2340
2341 *(uint16_t *)p = le16_to_cpu(val);
2342 p += 2;
2343 s->data_ptr = p;
2344 if (p >= s->data_end) {
2345 s->status &= ~DRQ_STAT;
2346 s->end_transfer_func(s);
2347 }
2348 }
2349
2350 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2351 {
2352 IDEBus *bus = opaque;
2353 IDEState *s = idebus_active_if(bus);
2354 uint8_t *p;
2355 int ret;
2356
2357 /* PIO data access allowed only when DRQ bit is set. The result of a read
2358 * during PIO in is indeterminate, return 0 and don't move forward. */
2359 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2360 return 0;
2361 }
2362
2363 p = s->data_ptr;
2364 if (p + 2 > s->data_end) {
2365 return 0;
2366 }
2367
2368 ret = cpu_to_le16(*(uint16_t *)p);
2369 p += 2;
2370 s->data_ptr = p;
2371 if (p >= s->data_end) {
2372 s->status &= ~DRQ_STAT;
2373 s->end_transfer_func(s);
2374 }
2375
2376 trace_ide_data_readw(addr, ret, bus, s);
2377 return ret;
2378 }
2379
2380 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2381 {
2382 IDEBus *bus = opaque;
2383 IDEState *s = idebus_active_if(bus);
2384 uint8_t *p;
2385
2386 trace_ide_data_writel(addr, val, bus, s);
2387
2388 /* PIO data access allowed only when DRQ bit is set. The result of a write
2389 * during PIO out is indeterminate, just ignore it. */
2390 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2391 return;
2392 }
2393
2394 p = s->data_ptr;
2395 if (p + 4 > s->data_end) {
2396 return;
2397 }
2398
2399 *(uint32_t *)p = le32_to_cpu(val);
2400 p += 4;
2401 s->data_ptr = p;
2402 if (p >= s->data_end) {
2403 s->status &= ~DRQ_STAT;
2404 s->end_transfer_func(s);
2405 }
2406 }
2407
2408 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2409 {
2410 IDEBus *bus = opaque;
2411 IDEState *s = idebus_active_if(bus);
2412 uint8_t *p;
2413 int ret;
2414
2415 /* PIO data access allowed only when DRQ bit is set. The result of a read
2416 * during PIO in is indeterminate, return 0 and don't move forward. */
2417 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2418 ret = 0;
2419 goto out;
2420 }
2421
2422 p = s->data_ptr;
2423 if (p + 4 > s->data_end) {
2424 return 0;
2425 }
2426
2427 ret = cpu_to_le32(*(uint32_t *)p);
2428 p += 4;
2429 s->data_ptr = p;
2430 if (p >= s->data_end) {
2431 s->status &= ~DRQ_STAT;
2432 s->end_transfer_func(s);
2433 }
2434
2435 out:
2436 trace_ide_data_readl(addr, ret, bus, s);
2437 return ret;
2438 }
2439
2440 static void ide_dummy_transfer_stop(IDEState *s)
2441 {
2442 s->data_ptr = s->io_buffer;
2443 s->data_end = s->io_buffer;
2444 s->io_buffer[0] = 0xff;
2445 s->io_buffer[1] = 0xff;
2446 s->io_buffer[2] = 0xff;
2447 s->io_buffer[3] = 0xff;
2448 }
2449
2450 void ide_bus_reset(IDEBus *bus)
2451 {
2452 bus->unit = 0;
2453 bus->cmd = 0;
2454 ide_reset(&bus->ifs[0]);
2455 ide_reset(&bus->ifs[1]);
2456 ide_clear_hob(bus);
2457
2458 /* pending async DMA */
2459 if (bus->dma->aiocb) {
2460 trace_ide_bus_reset_aio();
2461 blk_aio_cancel(bus->dma->aiocb);
2462 bus->dma->aiocb = NULL;
2463 }
2464
2465 /* reset dma provider too */
2466 if (bus->dma->ops->reset) {
2467 bus->dma->ops->reset(bus->dma);
2468 }
2469 }
2470
2471 static bool ide_cd_is_tray_open(void *opaque)
2472 {
2473 return ((IDEState *)opaque)->tray_open;
2474 }
2475
2476 static bool ide_cd_is_medium_locked(void *opaque)
2477 {
2478 return ((IDEState *)opaque)->tray_locked;
2479 }
2480
2481 static void ide_resize_cb(void *opaque)
2482 {
2483 IDEState *s = opaque;
2484 uint64_t nb_sectors;
2485
2486 if (!s->identify_set) {
2487 return;
2488 }
2489
2490 blk_get_geometry(s->blk, &nb_sectors);
2491 s->nb_sectors = nb_sectors;
2492
2493 /* Update the identify data buffer. */
2494 if (s->drive_kind == IDE_CFATA) {
2495 ide_cfata_identify_size(s);
2496 } else {
2497 /* IDE_CD uses a different set of callbacks entirely. */
2498 assert(s->drive_kind != IDE_CD);
2499 ide_identify_size(s);
2500 }
2501 }
2502
2503 static const BlockDevOps ide_cd_block_ops = {
2504 .change_media_cb = ide_cd_change_cb,
2505 .eject_request_cb = ide_cd_eject_request_cb,
2506 .is_tray_open = ide_cd_is_tray_open,
2507 .is_medium_locked = ide_cd_is_medium_locked,
2508 };
2509
2510 static const BlockDevOps ide_hd_block_ops = {
2511 .resize_cb = ide_resize_cb,
2512 };
2513
2514 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2515 const char *version, const char *serial, const char *model,
2516 uint64_t wwn,
2517 uint32_t cylinders, uint32_t heads, uint32_t secs,
2518 int chs_trans, Error **errp)
2519 {
2520 uint64_t nb_sectors;
2521
2522 s->blk = blk;
2523 s->drive_kind = kind;
2524
2525 blk_get_geometry(blk, &nb_sectors);
2526 s->cylinders = cylinders;
2527 s->heads = heads;
2528 s->sectors = secs;
2529 s->chs_trans = chs_trans;
2530 s->nb_sectors = nb_sectors;
2531 s->wwn = wwn;
2532 /* The SMART values should be preserved across power cycles
2533 but they aren't. */
2534 s->smart_enabled = 1;
2535 s->smart_autosave = 1;
2536 s->smart_errors = 0;
2537 s->smart_selftest_count = 0;
2538 if (kind == IDE_CD) {
2539 blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2540 blk_set_guest_block_size(blk, 2048);
2541 } else {
2542 if (!blk_is_inserted(s->blk)) {
2543 error_setg(errp, "Device needs media, but drive is empty");
2544 return -1;
2545 }
2546 if (!blk_is_writable(blk)) {
2547 error_setg(errp, "Can't use a read-only drive");
2548 return -1;
2549 }
2550 blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2551 }
2552 if (serial) {
2553 pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2554 } else {
2555 snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2556 "QM%05d", s->drive_serial);
2557 }
2558 if (model) {
2559 pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2560 } else {
2561 switch (kind) {
2562 case IDE_CD:
2563 strcpy(s->drive_model_str, "QEMU DVD-ROM");
2564 break;
2565 case IDE_CFATA:
2566 strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2567 break;
2568 default:
2569 strcpy(s->drive_model_str, "QEMU HARDDISK");
2570 break;
2571 }
2572 }
2573
2574 if (version) {
2575 pstrcpy(s->version, sizeof(s->version), version);
2576 } else {
2577 pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2578 }
2579
2580 ide_reset(s);
2581 blk_iostatus_enable(blk);
2582 return 0;
2583 }
2584
2585 static void ide_init1(IDEBus *bus, int unit)
2586 {
2587 static int drive_serial = 1;
2588 IDEState *s = &bus->ifs[unit];
2589
2590 s->bus = bus;
2591 s->unit = unit;
2592 s->drive_serial = drive_serial++;
2593 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2594 s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2595 s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2596 memset(s->io_buffer, 0, s->io_buffer_total_len);
2597
2598 s->smart_selftest_data = blk_blockalign(s->blk, 512);
2599 memset(s->smart_selftest_data, 0, 512);
2600
2601 s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2602 ide_sector_write_timer_cb, s);
2603 }
2604
2605 static int ide_nop_int(const IDEDMA *dma, bool is_write)
2606 {
2607 return 0;
2608 }
2609
2610 static void ide_nop(const IDEDMA *dma)
2611 {
2612 }
2613
2614 static int32_t ide_nop_int32(const IDEDMA *dma, int32_t l)
2615 {
2616 return 0;
2617 }
2618
2619 static const IDEDMAOps ide_dma_nop_ops = {
2620 .prepare_buf = ide_nop_int32,
2621 .restart_dma = ide_nop,
2622 .rw_buf = ide_nop_int,
2623 };
2624
2625 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2626 {
2627 s->unit = s->bus->retry_unit;
2628 ide_set_sector(s, s->bus->retry_sector_num);
2629 s->nsector = s->bus->retry_nsector;
2630 s->bus->dma->ops->restart_dma(s->bus->dma);
2631 s->io_buffer_size = 0;
2632 s->dma_cmd = dma_cmd;
2633 ide_start_dma(s, ide_dma_cb);
2634 }
2635
2636 static void ide_restart_bh(void *opaque)
2637 {
2638 IDEBus *bus = opaque;
2639 IDEState *s;
2640 bool is_read;
2641 int error_status;
2642
2643 qemu_bh_delete(bus->bh);
2644 bus->bh = NULL;
2645
2646 error_status = bus->error_status;
2647 if (bus->error_status == 0) {
2648 return;
2649 }
2650
2651 s = idebus_active_if(bus);
2652 is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2653
2654 /* The error status must be cleared before resubmitting the request: The
2655 * request may fail again, and this case can only be distinguished if the
2656 * called function can set a new error status. */
2657 bus->error_status = 0;
2658
2659 /* The HBA has generically asked to be kicked on retry */
2660 if (error_status & IDE_RETRY_HBA) {
2661 if (s->bus->dma->ops->restart) {
2662 s->bus->dma->ops->restart(s->bus->dma);
2663 }
2664 } else if (IS_IDE_RETRY_DMA(error_status)) {
2665 if (error_status & IDE_RETRY_TRIM) {
2666 ide_restart_dma(s, IDE_DMA_TRIM);
2667 } else {
2668 ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2669 }
2670 } else if (IS_IDE_RETRY_PIO(error_status)) {
2671 if (is_read) {
2672 ide_sector_read(s);
2673 } else {
2674 ide_sector_write(s);
2675 }
2676 } else if (error_status & IDE_RETRY_FLUSH) {
2677 ide_flush_cache(s);
2678 } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2679 assert(s->end_transfer_func == ide_atapi_cmd);
2680 ide_atapi_dma_restart(s);
2681 } else {
2682 abort();
2683 }
2684 }
2685
2686 static void ide_restart_cb(void *opaque, bool running, RunState state)
2687 {
2688 IDEBus *bus = opaque;
2689
2690 if (!running)
2691 return;
2692
2693 if (!bus->bh) {
2694 bus->bh = qemu_bh_new(ide_restart_bh, bus);
2695 qemu_bh_schedule(bus->bh);
2696 }
2697 }
2698
2699 void ide_register_restart_cb(IDEBus *bus)
2700 {
2701 if (bus->dma->ops->restart_dma) {
2702 bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2703 }
2704 }
2705
2706 static IDEDMA ide_dma_nop = {
2707 .ops = &ide_dma_nop_ops,
2708 .aiocb = NULL,
2709 };
2710
2711 void ide_init2(IDEBus *bus, qemu_irq irq)
2712 {
2713 int i;
2714
2715 for(i = 0; i < 2; i++) {
2716 ide_init1(bus, i);
2717 ide_reset(&bus->ifs[i]);
2718 }
2719 bus->irq = irq;
2720 bus->dma = &ide_dma_nop;
2721 }
2722
2723 void ide_exit(IDEState *s)
2724 {
2725 timer_free(s->sector_write_timer);
2726 qemu_vfree(s->smart_selftest_data);
2727 qemu_vfree(s->io_buffer);
2728 }
2729
2730 static bool is_identify_set(void *opaque, int version_id)
2731 {
2732 IDEState *s = opaque;
2733
2734 return s->identify_set != 0;
2735 }
2736
2737 static EndTransferFunc* transfer_end_table[] = {
2738 ide_sector_read,
2739 ide_sector_write,
2740 ide_transfer_stop,
2741 ide_atapi_cmd_reply_end,
2742 ide_atapi_cmd,
2743 ide_dummy_transfer_stop,
2744 };
2745
2746 static int transfer_end_table_idx(EndTransferFunc *fn)
2747 {
2748 int i;
2749
2750 for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2751 if (transfer_end_table[i] == fn)
2752 return i;
2753
2754 return -1;
2755 }
2756
2757 static int ide_drive_post_load(void *opaque, int version_id)
2758 {
2759 IDEState *s = opaque;
2760
2761 if (s->blk && s->identify_set) {
2762 blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2763 }
2764 return 0;
2765 }
2766
2767 static int ide_drive_pio_post_load(void *opaque, int version_id)
2768 {
2769 IDEState *s = opaque;
2770
2771 if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2772 return -EINVAL;
2773 }
2774 s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2775 s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2776 s->data_end = s->data_ptr + s->cur_io_buffer_len;
2777 s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2778
2779 return 0;
2780 }
2781
2782 static int ide_drive_pio_pre_save(void *opaque)
2783 {
2784 IDEState *s = opaque;
2785 int idx;
2786
2787 s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2788 s->cur_io_buffer_len = s->data_end - s->data_ptr;
2789
2790 idx = transfer_end_table_idx(s->end_transfer_func);
2791 if (idx == -1) {
2792 fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2793 __func__);
2794 s->end_transfer_fn_idx = 2;
2795 } else {
2796 s->end_transfer_fn_idx = idx;
2797 }
2798
2799 return 0;
2800 }
2801
2802 static bool ide_drive_pio_state_needed(void *opaque)
2803 {
2804 IDEState *s = opaque;
2805
2806 return ((s->status & DRQ_STAT) != 0)
2807 || (s->bus->error_status & IDE_RETRY_PIO);
2808 }
2809
2810 static bool ide_tray_state_needed(void *opaque)
2811 {
2812 IDEState *s = opaque;
2813
2814 return s->tray_open || s->tray_locked;
2815 }
2816
2817 static bool ide_atapi_gesn_needed(void *opaque)
2818 {
2819 IDEState *s = opaque;
2820
2821 return s->events.new_media || s->events.eject_request;
2822 }
2823
2824 static bool ide_error_needed(void *opaque)
2825 {
2826 IDEBus *bus = opaque;
2827
2828 return (bus->error_status != 0);
2829 }
2830
2831 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2832 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2833 .name ="ide_drive/atapi/gesn_state",
2834 .version_id = 1,
2835 .minimum_version_id = 1,
2836 .needed = ide_atapi_gesn_needed,
2837 .fields = (VMStateField[]) {
2838 VMSTATE_BOOL(events.new_media, IDEState),
2839 VMSTATE_BOOL(events.eject_request, IDEState),
2840 VMSTATE_END_OF_LIST()
2841 }
2842 };
2843
2844 static const VMStateDescription vmstate_ide_tray_state = {
2845 .name = "ide_drive/tray_state",
2846 .version_id = 1,
2847 .minimum_version_id = 1,
2848 .needed = ide_tray_state_needed,
2849 .fields = (VMStateField[]) {
2850 VMSTATE_BOOL(tray_open, IDEState),
2851 VMSTATE_BOOL(tray_locked, IDEState),
2852 VMSTATE_END_OF_LIST()
2853 }
2854 };
2855
2856 static const VMStateDescription vmstate_ide_drive_pio_state = {
2857 .name = "ide_drive/pio_state",
2858 .version_id = 1,
2859 .minimum_version_id = 1,
2860 .pre_save = ide_drive_pio_pre_save,
2861 .post_load = ide_drive_pio_post_load,
2862 .needed = ide_drive_pio_state_needed,
2863 .fields = (VMStateField[]) {
2864 VMSTATE_INT32(req_nb_sectors, IDEState),
2865 VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2866 vmstate_info_uint8, uint8_t),
2867 VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2868 VMSTATE_INT32(cur_io_buffer_len, IDEState),
2869 VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2870 VMSTATE_INT32(elementary_transfer_size, IDEState),
2871 VMSTATE_INT32(packet_transfer_size, IDEState),
2872 VMSTATE_END_OF_LIST()
2873 }
2874 };
2875
2876 const VMStateDescription vmstate_ide_drive = {
2877 .name = "ide_drive",
2878 .version_id = 3,
2879 .minimum_version_id = 0,
2880 .post_load = ide_drive_post_load,
2881 .fields = (VMStateField[]) {
2882 VMSTATE_INT32(mult_sectors, IDEState),
2883 VMSTATE_INT32(identify_set, IDEState),
2884 VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2885 VMSTATE_UINT8(feature, IDEState),
2886 VMSTATE_UINT8(error, IDEState),
2887 VMSTATE_UINT32(nsector, IDEState),
2888 VMSTATE_UINT8(sector, IDEState),
2889 VMSTATE_UINT8(lcyl, IDEState),
2890 VMSTATE_UINT8(hcyl, IDEState),
2891 VMSTATE_UINT8(hob_feature, IDEState),
2892 VMSTATE_UINT8(hob_sector, IDEState),
2893 VMSTATE_UINT8(hob_nsector, IDEState),
2894 VMSTATE_UINT8(hob_lcyl, IDEState),
2895 VMSTATE_UINT8(hob_hcyl, IDEState),
2896 VMSTATE_UINT8(select, IDEState),
2897 VMSTATE_UINT8(status, IDEState),
2898 VMSTATE_UINT8(lba48, IDEState),
2899 VMSTATE_UINT8(sense_key, IDEState),
2900 VMSTATE_UINT8(asc, IDEState),
2901 VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2902 VMSTATE_END_OF_LIST()
2903 },
2904 .subsections = (const VMStateDescription*[]) {
2905 &vmstate_ide_drive_pio_state,
2906 &vmstate_ide_tray_state,
2907 &vmstate_ide_atapi_gesn_state,
2908 NULL
2909 }
2910 };
2911
2912 static const VMStateDescription vmstate_ide_error_status = {
2913 .name ="ide_bus/error",
2914 .version_id = 2,
2915 .minimum_version_id = 1,
2916 .needed = ide_error_needed,
2917 .fields = (VMStateField[]) {
2918 VMSTATE_INT32(error_status, IDEBus),
2919 VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2920 VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2921 VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2922 VMSTATE_END_OF_LIST()
2923 }
2924 };
2925
2926 const VMStateDescription vmstate_ide_bus = {
2927 .name = "ide_bus",
2928 .version_id = 1,
2929 .minimum_version_id = 1,
2930 .fields = (VMStateField[]) {
2931 VMSTATE_UINT8(cmd, IDEBus),
2932 VMSTATE_UINT8(unit, IDEBus),
2933 VMSTATE_END_OF_LIST()
2934 },
2935 .subsections = (const VMStateDescription*[]) {
2936 &vmstate_ide_error_status,
2937 NULL
2938 }
2939 };
2940
2941 void ide_drive_get(DriveInfo **hd, int n)
2942 {
2943 int i;
2944
2945 for (i = 0; i < n; i++) {
2946 hd[i] = drive_get_by_index(IF_IDE, i);
2947 }
2948 }