]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/ide/ide-iops.c
73ff16bf9f1cb2f71028a6775d88bef9077caf1f
[mirror_ubuntu-artful-kernel.git] / drivers / ide / ide-iops.c
1 /*
2 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
3 * Copyright (C) 2003 Red Hat
4 *
5 */
6
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/string.h>
10 #include <linux/kernel.h>
11 #include <linux/timer.h>
12 #include <linux/mm.h>
13 #include <linux/interrupt.h>
14 #include <linux/major.h>
15 #include <linux/errno.h>
16 #include <linux/genhd.h>
17 #include <linux/blkpg.h>
18 #include <linux/slab.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/ide.h>
22 #include <linux/bitops.h>
23 #include <linux/nmi.h>
24
25 #include <asm/byteorder.h>
26 #include <asm/irq.h>
27 #include <asm/uaccess.h>
28 #include <asm/io.h>
29
30 void SELECT_DRIVE (ide_drive_t *drive)
31 {
32 ide_hwif_t *hwif = drive->hwif;
33 const struct ide_port_ops *port_ops = hwif->port_ops;
34 ide_task_t task;
35
36 if (port_ops && port_ops->selectproc)
37 port_ops->selectproc(drive);
38
39 memset(&task, 0, sizeof(task));
40 task.tf_flags = IDE_TFLAG_OUT_DEVICE;
41
42 drive->hwif->tp_ops->tf_load(drive, &task);
43 }
44
45 void SELECT_MASK(ide_drive_t *drive, int mask)
46 {
47 const struct ide_port_ops *port_ops = drive->hwif->port_ops;
48
49 if (port_ops && port_ops->maskproc)
50 port_ops->maskproc(drive, mask);
51 }
52
53 u8 ide_read_error(ide_drive_t *drive)
54 {
55 ide_task_t task;
56
57 memset(&task, 0, sizeof(task));
58 task.tf_flags = IDE_TFLAG_IN_FEATURE;
59
60 drive->hwif->tp_ops->tf_read(drive, &task);
61
62 return task.tf.error;
63 }
64 EXPORT_SYMBOL_GPL(ide_read_error);
65
66 void ide_fix_driveid(u16 *id)
67 {
68 #ifndef __LITTLE_ENDIAN
69 # ifdef __BIG_ENDIAN
70 int i;
71
72 for (i = 0; i < 256; i++)
73 id[i] = __le16_to_cpu(id[i]);
74 # else
75 # error "Please fix <asm/byteorder.h>"
76 # endif
77 #endif
78 }
79
80 /*
81 * ide_fixstring() cleans up and (optionally) byte-swaps a text string,
82 * removing leading/trailing blanks and compressing internal blanks.
83 * It is primarily used to tidy up the model name/number fields as
84 * returned by the ATA_CMD_ID_ATA[PI] commands.
85 */
86
87 void ide_fixstring (u8 *s, const int bytecount, const int byteswap)
88 {
89 u8 *p, *end = &s[bytecount & ~1]; /* bytecount must be even */
90
91 if (byteswap) {
92 /* convert from big-endian to host byte order */
93 for (p = s ; p != end ; p += 2)
94 be16_to_cpus((u16 *) p);
95 }
96
97 /* strip leading blanks */
98 p = s;
99 while (s != end && *s == ' ')
100 ++s;
101 /* compress internal blanks and strip trailing blanks */
102 while (s != end && *s) {
103 if (*s++ != ' ' || (s != end && *s && *s != ' '))
104 *p++ = *(s-1);
105 }
106 /* wipe out trailing garbage */
107 while (p != end)
108 *p++ = '\0';
109 }
110
111 EXPORT_SYMBOL(ide_fixstring);
112
113 /*
114 * This routine busy-waits for the drive status to be not "busy".
115 * It then checks the status for all of the "good" bits and none
116 * of the "bad" bits, and if all is okay it returns 0. All other
117 * cases return error -- caller may then invoke ide_error().
118 *
119 * This routine should get fixed to not hog the cpu during extra long waits..
120 * That could be done by busy-waiting for the first jiffy or two, and then
121 * setting a timer to wake up at half second intervals thereafter,
122 * until timeout is achieved, before timing out.
123 */
124 static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat)
125 {
126 ide_hwif_t *hwif = drive->hwif;
127 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
128 unsigned long flags;
129 int i;
130 u8 stat;
131
132 udelay(1); /* spec allows drive 400ns to assert "BUSY" */
133 stat = tp_ops->read_status(hwif);
134
135 if (stat & ATA_BUSY) {
136 local_save_flags(flags);
137 local_irq_enable_in_hardirq();
138 timeout += jiffies;
139 while ((stat = tp_ops->read_status(hwif)) & ATA_BUSY) {
140 if (time_after(jiffies, timeout)) {
141 /*
142 * One last read after the timeout in case
143 * heavy interrupt load made us not make any
144 * progress during the timeout..
145 */
146 stat = tp_ops->read_status(hwif);
147 if ((stat & ATA_BUSY) == 0)
148 break;
149
150 local_irq_restore(flags);
151 *rstat = stat;
152 return -EBUSY;
153 }
154 }
155 local_irq_restore(flags);
156 }
157 /*
158 * Allow status to settle, then read it again.
159 * A few rare drives vastly violate the 400ns spec here,
160 * so we'll wait up to 10usec for a "good" status
161 * rather than expensively fail things immediately.
162 * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
163 */
164 for (i = 0; i < 10; i++) {
165 udelay(1);
166 stat = tp_ops->read_status(hwif);
167
168 if (OK_STAT(stat, good, bad)) {
169 *rstat = stat;
170 return 0;
171 }
172 }
173 *rstat = stat;
174 return -EFAULT;
175 }
176
177 /*
178 * In case of error returns error value after doing "*startstop = ide_error()".
179 * The caller should return the updated value of "startstop" in this case,
180 * "startstop" is unchanged when the function returns 0.
181 */
182 int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout)
183 {
184 int err;
185 u8 stat;
186
187 /* bail early if we've exceeded max_failures */
188 if (drive->max_failures && (drive->failures > drive->max_failures)) {
189 *startstop = ide_stopped;
190 return 1;
191 }
192
193 err = __ide_wait_stat(drive, good, bad, timeout, &stat);
194
195 if (err) {
196 char *s = (err == -EBUSY) ? "status timeout" : "status error";
197 *startstop = ide_error(drive, s, stat);
198 }
199
200 return err;
201 }
202
203 EXPORT_SYMBOL(ide_wait_stat);
204
205 /**
206 * ide_in_drive_list - look for drive in black/white list
207 * @id: drive identifier
208 * @table: list to inspect
209 *
210 * Look for a drive in the blacklist and the whitelist tables
211 * Returns 1 if the drive is found in the table.
212 */
213
214 int ide_in_drive_list(u16 *id, const struct drive_list_entry *table)
215 {
216 for ( ; table->id_model; table++)
217 if ((!strcmp(table->id_model, (char *)&id[ATA_ID_PROD])) &&
218 (!table->id_firmware ||
219 strstr((char *)&id[ATA_ID_FW_REV], table->id_firmware)))
220 return 1;
221 return 0;
222 }
223
224 EXPORT_SYMBOL_GPL(ide_in_drive_list);
225
226 /*
227 * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid.
228 * We list them here and depend on the device side cable detection for them.
229 *
230 * Some optical devices with the buggy firmwares have the same problem.
231 */
232 static const struct drive_list_entry ivb_list[] = {
233 { "QUANTUM FIREBALLlct10 05" , "A03.0900" },
234 { "TSSTcorp CDDVDW SH-S202J" , "SB00" },
235 { "TSSTcorp CDDVDW SH-S202J" , "SB01" },
236 { "TSSTcorp CDDVDW SH-S202N" , "SB00" },
237 { "TSSTcorp CDDVDW SH-S202N" , "SB01" },
238 { "TSSTcorp CDDVDW SH-S202H" , "SB00" },
239 { "TSSTcorp CDDVDW SH-S202H" , "SB01" },
240 { "SAMSUNG SP0822N" , "WA100-10" },
241 { NULL , NULL }
242 };
243
244 /*
245 * All hosts that use the 80c ribbon must use!
246 * The name is derived from upper byte of word 93 and the 80c ribbon.
247 */
248 u8 eighty_ninty_three (ide_drive_t *drive)
249 {
250 ide_hwif_t *hwif = drive->hwif;
251 u16 *id = drive->id;
252 int ivb = ide_in_drive_list(id, ivb_list);
253
254 if (hwif->cbl == ATA_CBL_PATA40_SHORT)
255 return 1;
256
257 if (ivb)
258 printk(KERN_DEBUG "%s: skipping word 93 validity check\n",
259 drive->name);
260
261 if (ata_id_is_sata(id) && !ivb)
262 return 1;
263
264 if (hwif->cbl != ATA_CBL_PATA80 && !ivb)
265 goto no_80w;
266
267 /*
268 * FIXME:
269 * - change master/slave IDENTIFY order
270 * - force bit13 (80c cable present) check also for !ivb devices
271 * (unless the slave device is pre-ATA3)
272 */
273 if ((id[ATA_ID_HW_CONFIG] & 0x4000) ||
274 (ivb && (id[ATA_ID_HW_CONFIG] & 0x2000)))
275 return 1;
276
277 no_80w:
278 if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED)
279 return 0;
280
281 printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, "
282 "limiting max speed to UDMA33\n",
283 drive->name,
284 hwif->cbl == ATA_CBL_PATA80 ? "drive" : "host");
285
286 drive->dev_flags |= IDE_DFLAG_UDMA33_WARNED;
287
288 return 0;
289 }
290
291 int ide_driveid_update(ide_drive_t *drive)
292 {
293 ide_hwif_t *hwif = drive->hwif;
294 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
295 u16 *id;
296 unsigned long flags;
297 u8 stat;
298
299 /*
300 * Re-read drive->id for possible DMA mode
301 * change (copied from ide-probe.c)
302 */
303
304 SELECT_MASK(drive, 1);
305 tp_ops->set_irq(hwif, 0);
306 msleep(50);
307 tp_ops->exec_command(hwif, ATA_CMD_ID_ATA);
308
309 if (ide_busy_sleep(hwif, WAIT_WORSTCASE, 1)) {
310 SELECT_MASK(drive, 0);
311 return 0;
312 }
313
314 msleep(50); /* wait for IRQ and ATA_DRQ */
315 stat = tp_ops->read_status(hwif);
316
317 if (!OK_STAT(stat, ATA_DRQ, BAD_R_STAT)) {
318 SELECT_MASK(drive, 0);
319 printk("%s: CHECK for good STATUS\n", drive->name);
320 return 0;
321 }
322 local_irq_save(flags);
323 SELECT_MASK(drive, 0);
324 id = kmalloc(SECTOR_SIZE, GFP_ATOMIC);
325 if (!id) {
326 local_irq_restore(flags);
327 return 0;
328 }
329 tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
330 (void)tp_ops->read_status(hwif); /* clear drive IRQ */
331 local_irq_enable();
332 local_irq_restore(flags);
333 ide_fix_driveid(id);
334
335 drive->id[ATA_ID_UDMA_MODES] = id[ATA_ID_UDMA_MODES];
336 drive->id[ATA_ID_MWDMA_MODES] = id[ATA_ID_MWDMA_MODES];
337 drive->id[ATA_ID_SWDMA_MODES] = id[ATA_ID_SWDMA_MODES];
338 /* anything more ? */
339
340 kfree(id);
341
342 if ((drive->dev_flags & IDE_DFLAG_USING_DMA) && ide_id_dma_bug(drive))
343 ide_dma_off(drive);
344
345 return 1;
346 }
347
348 int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
349 {
350 ide_hwif_t *hwif = drive->hwif;
351 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
352 u16 *id = drive->id, i;
353 int error = 0;
354 u8 stat;
355 ide_task_t task;
356
357 #ifdef CONFIG_BLK_DEV_IDEDMA
358 if (hwif->dma_ops) /* check if host supports DMA */
359 hwif->dma_ops->dma_host_set(drive, 0);
360 #endif
361
362 /* Skip setting PIO flow-control modes on pre-EIDE drives */
363 if ((speed & 0xf8) == XFER_PIO_0 && ata_id_has_iordy(drive->id) == 0)
364 goto skip;
365
366 /*
367 * Don't use ide_wait_cmd here - it will
368 * attempt to set_geometry and recalibrate,
369 * but for some reason these don't work at
370 * this point (lost interrupt).
371 */
372
373 /*
374 * FIXME: we race against the running IRQ here if
375 * this is called from non IRQ context. If we use
376 * disable_irq() we hang on the error path. Work
377 * is needed.
378 */
379 disable_irq_nosync(hwif->irq);
380
381 udelay(1);
382 SELECT_DRIVE(drive);
383 SELECT_MASK(drive, 1);
384 udelay(1);
385 tp_ops->set_irq(hwif, 0);
386
387 memset(&task, 0, sizeof(task));
388 task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT;
389 task.tf.feature = SETFEATURES_XFER;
390 task.tf.nsect = speed;
391
392 tp_ops->tf_load(drive, &task);
393
394 tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES);
395
396 if (drive->quirk_list == 2)
397 tp_ops->set_irq(hwif, 1);
398
399 error = __ide_wait_stat(drive, drive->ready_stat,
400 ATA_BUSY | ATA_DRQ | ATA_ERR,
401 WAIT_CMD, &stat);
402
403 SELECT_MASK(drive, 0);
404
405 enable_irq(hwif->irq);
406
407 if (error) {
408 (void) ide_dump_status(drive, "set_drive_speed_status", stat);
409 return error;
410 }
411
412 id[ATA_ID_UDMA_MODES] &= ~0xFF00;
413 id[ATA_ID_MWDMA_MODES] &= ~0x0F00;
414 id[ATA_ID_SWDMA_MODES] &= ~0x0F00;
415
416 skip:
417 #ifdef CONFIG_BLK_DEV_IDEDMA
418 if (speed >= XFER_SW_DMA_0 && (drive->dev_flags & IDE_DFLAG_USING_DMA))
419 hwif->dma_ops->dma_host_set(drive, 1);
420 else if (hwif->dma_ops) /* check if host supports DMA */
421 ide_dma_off_quietly(drive);
422 #endif
423
424 if (speed >= XFER_UDMA_0) {
425 i = 1 << (speed - XFER_UDMA_0);
426 id[ATA_ID_UDMA_MODES] |= (i << 8 | i);
427 } else if (speed >= XFER_MW_DMA_0) {
428 i = 1 << (speed - XFER_MW_DMA_0);
429 id[ATA_ID_MWDMA_MODES] |= (i << 8 | i);
430 } else if (speed >= XFER_SW_DMA_0) {
431 i = 1 << (speed - XFER_SW_DMA_0);
432 id[ATA_ID_SWDMA_MODES] |= (i << 8 | i);
433 }
434
435 if (!drive->init_speed)
436 drive->init_speed = speed;
437 drive->current_speed = speed;
438 return error;
439 }
440
441 /*
442 * This should get invoked any time we exit the driver to
443 * wait for an interrupt response from a drive. handler() points
444 * at the appropriate code to handle the next interrupt, and a
445 * timer is started to prevent us from waiting forever in case
446 * something goes wrong (see the ide_timer_expiry() handler later on).
447 *
448 * See also ide_execute_command
449 */
450 static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
451 unsigned int timeout, ide_expiry_t *expiry)
452 {
453 ide_hwif_t *hwif = drive->hwif;
454
455 BUG_ON(hwif->handler);
456 hwif->handler = handler;
457 hwif->expiry = expiry;
458 hwif->timer.expires = jiffies + timeout;
459 hwif->req_gen_timer = hwif->req_gen;
460 add_timer(&hwif->timer);
461 }
462
463 void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
464 unsigned int timeout, ide_expiry_t *expiry)
465 {
466 ide_hwif_t *hwif = drive->hwif;
467 unsigned long flags;
468
469 spin_lock_irqsave(&hwif->lock, flags);
470 __ide_set_handler(drive, handler, timeout, expiry);
471 spin_unlock_irqrestore(&hwif->lock, flags);
472 }
473
474 EXPORT_SYMBOL(ide_set_handler);
475
476 /**
477 * ide_execute_command - execute an IDE command
478 * @drive: IDE drive to issue the command against
479 * @command: command byte to write
480 * @handler: handler for next phase
481 * @timeout: timeout for command
482 * @expiry: handler to run on timeout
483 *
484 * Helper function to issue an IDE command. This handles the
485 * atomicity requirements, command timing and ensures that the
486 * handler and IRQ setup do not race. All IDE command kick off
487 * should go via this function or do equivalent locking.
488 */
489
490 void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
491 unsigned timeout, ide_expiry_t *expiry)
492 {
493 ide_hwif_t *hwif = drive->hwif;
494 unsigned long flags;
495
496 spin_lock_irqsave(&hwif->lock, flags);
497 __ide_set_handler(drive, handler, timeout, expiry);
498 hwif->tp_ops->exec_command(hwif, cmd);
499 /*
500 * Drive takes 400nS to respond, we must avoid the IRQ being
501 * serviced before that.
502 *
503 * FIXME: we could skip this delay with care on non shared devices
504 */
505 ndelay(400);
506 spin_unlock_irqrestore(&hwif->lock, flags);
507 }
508 EXPORT_SYMBOL(ide_execute_command);
509
510 void ide_execute_pkt_cmd(ide_drive_t *drive)
511 {
512 ide_hwif_t *hwif = drive->hwif;
513 unsigned long flags;
514
515 spin_lock_irqsave(&hwif->lock, flags);
516 hwif->tp_ops->exec_command(hwif, ATA_CMD_PACKET);
517 ndelay(400);
518 spin_unlock_irqrestore(&hwif->lock, flags);
519 }
520 EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd);
521
522 static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
523 {
524 struct request *rq = drive->hwif->rq;
525
526 if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET)
527 ide_end_request(drive, err ? err : 1, 0);
528 }
529
530 /* needed below */
531 static ide_startstop_t do_reset1 (ide_drive_t *, int);
532
533 /*
534 * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms
535 * during an atapi drive reset operation. If the drive has not yet responded,
536 * and we have not yet hit our maximum waiting time, then the timer is restarted
537 * for another 50ms.
538 */
539 static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
540 {
541 ide_hwif_t *hwif = drive->hwif;
542 u8 stat;
543
544 SELECT_DRIVE(drive);
545 udelay (10);
546 stat = hwif->tp_ops->read_status(hwif);
547
548 if (OK_STAT(stat, 0, ATA_BUSY))
549 printk(KERN_INFO "%s: ATAPI reset complete\n", drive->name);
550 else {
551 if (time_before(jiffies, hwif->poll_timeout)) {
552 ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
553 /* continue polling */
554 return ide_started;
555 }
556 /* end of polling */
557 hwif->polling = 0;
558 printk(KERN_ERR "%s: ATAPI reset timed-out, status=0x%02x\n",
559 drive->name, stat);
560 /* do it the old fashioned way */
561 return do_reset1(drive, 1);
562 }
563 /* done polling */
564 hwif->polling = 0;
565 ide_complete_drive_reset(drive, 0);
566 return ide_stopped;
567 }
568
569 static void ide_reset_report_error(ide_hwif_t *hwif, u8 err)
570 {
571 static const char *err_master_vals[] =
572 { NULL, "passed", "formatter device error",
573 "sector buffer error", "ECC circuitry error",
574 "controlling MPU error" };
575
576 u8 err_master = err & 0x7f;
577
578 printk(KERN_ERR "%s: reset: master: ", hwif->name);
579 if (err_master && err_master < 6)
580 printk(KERN_CONT "%s", err_master_vals[err_master]);
581 else
582 printk(KERN_CONT "error (0x%02x?)", err);
583 if (err & 0x80)
584 printk(KERN_CONT "; slave: failed");
585 printk(KERN_CONT "\n");
586 }
587
588 /*
589 * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
590 * during an ide reset operation. If the drives have not yet responded,
591 * and we have not yet hit our maximum waiting time, then the timer is restarted
592 * for another 50ms.
593 */
594 static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
595 {
596 ide_hwif_t *hwif = drive->hwif;
597 const struct ide_port_ops *port_ops = hwif->port_ops;
598 u8 tmp;
599 int err = 0;
600
601 if (port_ops && port_ops->reset_poll) {
602 err = port_ops->reset_poll(drive);
603 if (err) {
604 printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
605 hwif->name, drive->name);
606 goto out;
607 }
608 }
609
610 tmp = hwif->tp_ops->read_status(hwif);
611
612 if (!OK_STAT(tmp, 0, ATA_BUSY)) {
613 if (time_before(jiffies, hwif->poll_timeout)) {
614 ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
615 /* continue polling */
616 return ide_started;
617 }
618 printk(KERN_ERR "%s: reset timed-out, status=0x%02x\n",
619 hwif->name, tmp);
620 drive->failures++;
621 err = -EIO;
622 } else {
623 tmp = ide_read_error(drive);
624
625 if (tmp == 1) {
626 printk(KERN_INFO "%s: reset: success\n", hwif->name);
627 drive->failures = 0;
628 } else {
629 ide_reset_report_error(hwif, tmp);
630 drive->failures++;
631 err = -EIO;
632 }
633 }
634 out:
635 hwif->polling = 0; /* done polling */
636 ide_complete_drive_reset(drive, err);
637 return ide_stopped;
638 }
639
640 static void ide_disk_pre_reset(ide_drive_t *drive)
641 {
642 int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1;
643
644 drive->special.all = 0;
645 drive->special.b.set_geometry = legacy;
646 drive->special.b.recalibrate = legacy;
647
648 drive->mult_count = 0;
649 drive->dev_flags &= ~IDE_DFLAG_PARKED;
650
651 if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0 &&
652 (drive->dev_flags & IDE_DFLAG_USING_DMA) == 0)
653 drive->mult_req = 0;
654
655 if (drive->mult_req != drive->mult_count)
656 drive->special.b.set_multmode = 1;
657 }
658
659 static void pre_reset(ide_drive_t *drive)
660 {
661 const struct ide_port_ops *port_ops = drive->hwif->port_ops;
662
663 if (drive->media == ide_disk)
664 ide_disk_pre_reset(drive);
665 else
666 drive->dev_flags |= IDE_DFLAG_POST_RESET;
667
668 if (drive->dev_flags & IDE_DFLAG_USING_DMA) {
669 if (drive->crc_count)
670 ide_check_dma_crc(drive);
671 else
672 ide_dma_off(drive);
673 }
674
675 if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0) {
676 if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) {
677 drive->dev_flags &= ~IDE_DFLAG_UNMASK;
678 drive->io_32bit = 0;
679 }
680 return;
681 }
682
683 if (port_ops && port_ops->pre_reset)
684 port_ops->pre_reset(drive);
685
686 if (drive->current_speed != 0xff)
687 drive->desired_speed = drive->current_speed;
688 drive->current_speed = 0xff;
689 }
690
691 /*
692 * do_reset1() attempts to recover a confused drive by resetting it.
693 * Unfortunately, resetting a disk drive actually resets all devices on
694 * the same interface, so it can really be thought of as resetting the
695 * interface rather than resetting the drive.
696 *
697 * ATAPI devices have their own reset mechanism which allows them to be
698 * individually reset without clobbering other devices on the same interface.
699 *
700 * Unfortunately, the IDE interface does not generate an interrupt to let
701 * us know when the reset operation has finished, so we must poll for this.
702 * Equally poor, though, is the fact that this may a very long time to complete,
703 * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
704 * we set a timer to poll at 50ms intervals.
705 */
706 static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
707 {
708 ide_hwif_t *hwif = drive->hwif;
709 struct ide_io_ports *io_ports = &hwif->io_ports;
710 const struct ide_tp_ops *tp_ops = hwif->tp_ops;
711 const struct ide_port_ops *port_ops;
712 ide_drive_t *tdrive;
713 unsigned long flags, timeout;
714 int i;
715 DEFINE_WAIT(wait);
716
717 spin_lock_irqsave(&hwif->lock, flags);
718
719 /* We must not reset with running handlers */
720 BUG_ON(hwif->handler != NULL);
721
722 /* For an ATAPI device, first try an ATAPI SRST. */
723 if (drive->media != ide_disk && !do_not_try_atapi) {
724 pre_reset(drive);
725 SELECT_DRIVE(drive);
726 udelay (20);
727 tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
728 ndelay(400);
729 hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
730 hwif->polling = 1;
731 __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
732 spin_unlock_irqrestore(&hwif->lock, flags);
733 return ide_started;
734 }
735
736 /* We must not disturb devices in the IDE_DFLAG_PARKED state. */
737 do {
738 unsigned long now;
739
740 prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE);
741 timeout = jiffies;
742 ide_port_for_each_present_dev(i, tdrive, hwif) {
743 if ((tdrive->dev_flags & IDE_DFLAG_PARKED) &&
744 time_after(tdrive->sleep, timeout))
745 timeout = tdrive->sleep;
746 }
747
748 now = jiffies;
749 if (time_before_eq(timeout, now))
750 break;
751
752 spin_unlock_irqrestore(&hwif->lock, flags);
753 timeout = schedule_timeout_uninterruptible(timeout - now);
754 spin_lock_irqsave(&hwif->lock, flags);
755 } while (timeout);
756 finish_wait(&ide_park_wq, &wait);
757
758 /*
759 * First, reset any device state data we were maintaining
760 * for any of the drives on this interface.
761 */
762 ide_port_for_each_dev(i, tdrive, hwif)
763 pre_reset(tdrive);
764
765 if (io_ports->ctl_addr == 0) {
766 spin_unlock_irqrestore(&hwif->lock, flags);
767 ide_complete_drive_reset(drive, -ENXIO);
768 return ide_stopped;
769 }
770
771 /*
772 * Note that we also set nIEN while resetting the device,
773 * to mask unwanted interrupts from the interface during the reset.
774 * However, due to the design of PC hardware, this will cause an
775 * immediate interrupt due to the edge transition it produces.
776 * This single interrupt gives us a "fast poll" for drives that
777 * recover from reset very quickly, saving us the first 50ms wait time.
778 *
779 * TODO: add ->softreset method and stop abusing ->set_irq
780 */
781 /* set SRST and nIEN */
782 tp_ops->set_irq(hwif, 4);
783 /* more than enough time */
784 udelay(10);
785 /* clear SRST, leave nIEN (unless device is on the quirk list) */
786 tp_ops->set_irq(hwif, drive->quirk_list == 2);
787 /* more than enough time */
788 udelay(10);
789 hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
790 hwif->polling = 1;
791 __ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
792
793 /*
794 * Some weird controller like resetting themselves to a strange
795 * state when the disks are reset this way. At least, the Winbond
796 * 553 documentation says that
797 */
798 port_ops = hwif->port_ops;
799 if (port_ops && port_ops->resetproc)
800 port_ops->resetproc(drive);
801
802 spin_unlock_irqrestore(&hwif->lock, flags);
803 return ide_started;
804 }
805
806 /*
807 * ide_do_reset() is the entry point to the drive/interface reset code.
808 */
809
810 ide_startstop_t ide_do_reset (ide_drive_t *drive)
811 {
812 return do_reset1(drive, 0);
813 }
814
815 EXPORT_SYMBOL(ide_do_reset);
816
817 /*
818 * ide_wait_not_busy() waits for the currently selected device on the hwif
819 * to report a non-busy status, see comments in ide_probe_port().
820 */
821 int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
822 {
823 u8 stat = 0;
824
825 while(timeout--) {
826 /*
827 * Turn this into a schedule() sleep once I'm sure
828 * about locking issues (2.5 work ?).
829 */
830 mdelay(1);
831 stat = hwif->tp_ops->read_status(hwif);
832 if ((stat & ATA_BUSY) == 0)
833 return 0;
834 /*
835 * Assume a value of 0xff means nothing is connected to
836 * the interface and it doesn't implement the pull-down
837 * resistor on D7.
838 */
839 if (stat == 0xff)
840 return -ENODEV;
841 touch_softlockup_watchdog();
842 touch_nmi_watchdog();
843 }
844 return -EBUSY;
845 }