4 * Basic PIO and command management functionality.
6 * This code was split off from ide.c. See ide.c for history and original
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * For the avoidance of doubt the "preferred form" of this code is one which
20 * is in an open non patent encumbered format. Where cryptographic key signing
21 * forms part of the process of creating an executable the information
22 * including keys needed to generate an equivalently functional executable
23 * are deemed to be part of the source code.
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/kernel.h>
31 #include <linux/timer.h>
33 #include <linux/interrupt.h>
34 #include <linux/major.h>
35 #include <linux/errno.h>
36 #include <linux/genhd.h>
37 #include <linux/blkpg.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ide.h>
43 #include <linux/completion.h>
44 #include <linux/reboot.h>
45 #include <linux/cdrom.h>
46 #include <linux/seq_file.h>
47 #include <linux/device.h>
48 #include <linux/kmod.h>
49 #include <linux/scatterlist.h>
51 #include <asm/byteorder.h>
53 #include <asm/uaccess.h>
55 #include <asm/bitops.h>
57 static int __ide_end_request(ide_drive_t
*drive
, struct request
*rq
,
58 int uptodate
, int nr_sectors
)
63 * if failfast is set on a request, override number of sectors and
64 * complete the whole request right now
66 if (blk_noretry_request(rq
) && end_io_error(uptodate
))
67 nr_sectors
= rq
->hard_nr_sectors
;
69 if (!blk_fs_request(rq
) && end_io_error(uptodate
) && !rq
->errors
)
73 * decide whether to reenable DMA -- 3 is a random magic for now,
74 * if we DMA timeout more than 3 times, just stay in PIO
76 if (drive
->state
== DMA_PIO_RETRY
&& drive
->retry_pio
<= 3) {
78 HWGROUP(drive
)->hwif
->ide_dma_on(drive
);
81 if (!end_that_request_first(rq
, uptodate
, nr_sectors
)) {
82 add_disk_randomness(rq
->rq_disk
);
83 if (!list_empty(&rq
->queuelist
))
84 blkdev_dequeue_request(rq
);
85 HWGROUP(drive
)->rq
= NULL
;
86 end_that_request_last(rq
, uptodate
);
94 * ide_end_request - complete an IDE I/O
95 * @drive: IDE device for the I/O
97 * @nr_sectors: number of sectors completed
99 * This is our end_request wrapper function. We complete the I/O
100 * update random number input and dequeue the request, which if
101 * it was tagged may be out of order.
104 int ide_end_request (ide_drive_t
*drive
, int uptodate
, int nr_sectors
)
111 * room for locking improvements here, the calls below don't
112 * need the queue lock held at all
114 spin_lock_irqsave(&ide_lock
, flags
);
115 rq
= HWGROUP(drive
)->rq
;
118 nr_sectors
= rq
->hard_cur_sectors
;
120 ret
= __ide_end_request(drive
, rq
, uptodate
, nr_sectors
);
122 spin_unlock_irqrestore(&ide_lock
, flags
);
125 EXPORT_SYMBOL(ide_end_request
);
128 * Power Management state machine. This one is rather trivial for now,
129 * we should probably add more, like switching back to PIO on suspend
130 * to help some BIOSes, re-do the door locking on resume, etc...
134 ide_pm_flush_cache
= ide_pm_state_start_suspend
,
137 idedisk_pm_idle
= ide_pm_state_start_resume
,
141 static void ide_complete_power_step(ide_drive_t
*drive
, struct request
*rq
, u8 stat
, u8 error
)
143 struct request_pm_state
*pm
= rq
->data
;
145 if (drive
->media
!= ide_disk
)
148 switch (pm
->pm_step
) {
149 case ide_pm_flush_cache
: /* Suspend step 1 (flush cache) complete */
150 if (pm
->pm_state
== PM_EVENT_FREEZE
)
151 pm
->pm_step
= ide_pm_state_completed
;
153 pm
->pm_step
= idedisk_pm_standby
;
155 case idedisk_pm_standby
: /* Suspend step 2 (standby) complete */
156 pm
->pm_step
= ide_pm_state_completed
;
158 case idedisk_pm_idle
: /* Resume step 1 (idle) complete */
159 pm
->pm_step
= ide_pm_restore_dma
;
164 static ide_startstop_t
ide_start_power_step(ide_drive_t
*drive
, struct request
*rq
)
166 struct request_pm_state
*pm
= rq
->data
;
167 ide_task_t
*args
= rq
->special
;
169 memset(args
, 0, sizeof(*args
));
171 if (drive
->media
!= ide_disk
) {
172 /* skip idedisk_pm_idle for ATAPI devices */
173 if (pm
->pm_step
== idedisk_pm_idle
)
174 pm
->pm_step
= ide_pm_restore_dma
;
177 switch (pm
->pm_step
) {
178 case ide_pm_flush_cache
: /* Suspend step 1 (flush cache) */
179 if (drive
->media
!= ide_disk
)
181 /* Not supported? Switch to next step now. */
182 if (!drive
->wcache
|| !ide_id_has_flush_cache(drive
->id
)) {
183 ide_complete_power_step(drive
, rq
, 0, 0);
186 if (ide_id_has_flush_cache_ext(drive
->id
))
187 args
->tfRegister
[IDE_COMMAND_OFFSET
] = WIN_FLUSH_CACHE_EXT
;
189 args
->tfRegister
[IDE_COMMAND_OFFSET
] = WIN_FLUSH_CACHE
;
190 args
->command_type
= IDE_DRIVE_TASK_NO_DATA
;
191 args
->handler
= &task_no_data_intr
;
192 return do_rw_taskfile(drive
, args
);
194 case idedisk_pm_standby
: /* Suspend step 2 (standby) */
195 args
->tfRegister
[IDE_COMMAND_OFFSET
] = WIN_STANDBYNOW1
;
196 args
->command_type
= IDE_DRIVE_TASK_NO_DATA
;
197 args
->handler
= &task_no_data_intr
;
198 return do_rw_taskfile(drive
, args
);
200 case idedisk_pm_idle
: /* Resume step 1 (idle) */
201 args
->tfRegister
[IDE_COMMAND_OFFSET
] = WIN_IDLEIMMEDIATE
;
202 args
->command_type
= IDE_DRIVE_TASK_NO_DATA
;
203 args
->handler
= task_no_data_intr
;
204 return do_rw_taskfile(drive
, args
);
206 case ide_pm_restore_dma
: /* Resume step 2 (restore DMA) */
208 * Right now, all we do is call hwif->ide_dma_check(drive),
209 * we could be smarter and check for current xfer_speed
210 * in struct drive etc...
212 if ((drive
->id
->capability
& 1) == 0)
214 if (drive
->hwif
->ide_dma_check
== NULL
)
216 drive
->hwif
->ide_dma_check(drive
);
219 pm
->pm_step
= ide_pm_state_completed
;
224 * ide_end_dequeued_request - complete an IDE I/O
225 * @drive: IDE device for the I/O
227 * @nr_sectors: number of sectors completed
229 * Complete an I/O that is no longer on the request queue. This
230 * typically occurs when we pull the request and issue a REQUEST_SENSE.
231 * We must still finish the old request but we must not tamper with the
232 * queue in the meantime.
234 * NOTE: This path does not handle barrier, but barrier is not supported
238 int ide_end_dequeued_request(ide_drive_t
*drive
, struct request
*rq
,
239 int uptodate
, int nr_sectors
)
244 spin_lock_irqsave(&ide_lock
, flags
);
246 BUG_ON(!blk_rq_started(rq
));
249 * if failfast is set on a request, override number of sectors and
250 * complete the whole request right now
252 if (blk_noretry_request(rq
) && end_io_error(uptodate
))
253 nr_sectors
= rq
->hard_nr_sectors
;
255 if (!blk_fs_request(rq
) && end_io_error(uptodate
) && !rq
->errors
)
259 * decide whether to reenable DMA -- 3 is a random magic for now,
260 * if we DMA timeout more than 3 times, just stay in PIO
262 if (drive
->state
== DMA_PIO_RETRY
&& drive
->retry_pio
<= 3) {
264 HWGROUP(drive
)->hwif
->ide_dma_on(drive
);
267 if (!end_that_request_first(rq
, uptodate
, nr_sectors
)) {
268 add_disk_randomness(rq
->rq_disk
);
269 if (blk_rq_tagged(rq
))
270 blk_queue_end_tag(drive
->queue
, rq
);
271 end_that_request_last(rq
, uptodate
);
274 spin_unlock_irqrestore(&ide_lock
, flags
);
277 EXPORT_SYMBOL_GPL(ide_end_dequeued_request
);
281 * ide_complete_pm_request - end the current Power Management request
282 * @drive: target drive
285 * This function cleans up the current PM request and stops the queue
288 static void ide_complete_pm_request (ide_drive_t
*drive
, struct request
*rq
)
293 printk("%s: completing PM request, %s\n", drive
->name
,
294 blk_pm_suspend_request(rq
) ? "suspend" : "resume");
296 spin_lock_irqsave(&ide_lock
, flags
);
297 if (blk_pm_suspend_request(rq
)) {
298 blk_stop_queue(drive
->queue
);
301 blk_start_queue(drive
->queue
);
303 blkdev_dequeue_request(rq
);
304 HWGROUP(drive
)->rq
= NULL
;
305 end_that_request_last(rq
, 1);
306 spin_unlock_irqrestore(&ide_lock
, flags
);
310 * FIXME: probably move this somewhere else, name is bad too :)
312 u64
ide_get_error_location(ide_drive_t
*drive
, char *args
)
323 if (ide_id_has_flush_cache_ext(drive
->id
)) {
324 low
= (hcyl
<< 16) | (lcyl
<< 8) | sect
;
325 HWIF(drive
)->OUTB(drive
->ctl
|0x80, IDE_CONTROL_REG
);
326 high
= ide_read_24(drive
);
328 u8 cur
= HWIF(drive
)->INB(IDE_SELECT_REG
);
331 low
= (hcyl
<< 16) | (lcyl
<< 8) | sect
;
333 low
= hcyl
* drive
->head
* drive
->sect
;
334 low
+= lcyl
* drive
->sect
;
339 sector
= ((u64
) high
<< 24) | low
;
342 EXPORT_SYMBOL(ide_get_error_location
);
345 * ide_end_drive_cmd - end an explicit drive command
350 * Clean up after success/failure of an explicit drive command.
351 * These get thrown onto the queue so they are synchronized with
352 * real I/O operations on the drive.
354 * In LBA48 mode we have to read the register set twice to get
355 * all the extra information out.
358 void ide_end_drive_cmd (ide_drive_t
*drive
, u8 stat
, u8 err
)
360 ide_hwif_t
*hwif
= HWIF(drive
);
364 spin_lock_irqsave(&ide_lock
, flags
);
365 rq
= HWGROUP(drive
)->rq
;
366 spin_unlock_irqrestore(&ide_lock
, flags
);
368 if (rq
->cmd_type
== REQ_TYPE_ATA_CMD
) {
369 u8
*args
= (u8
*) rq
->buffer
;
371 rq
->errors
= !OK_STAT(stat
,READY_STAT
,BAD_STAT
);
376 args
[2] = hwif
->INB(IDE_NSECTOR_REG
);
378 } else if (rq
->cmd_type
== REQ_TYPE_ATA_TASK
) {
379 u8
*args
= (u8
*) rq
->buffer
;
381 rq
->errors
= !OK_STAT(stat
,READY_STAT
,BAD_STAT
);
386 args
[2] = hwif
->INB(IDE_NSECTOR_REG
);
387 args
[3] = hwif
->INB(IDE_SECTOR_REG
);
388 args
[4] = hwif
->INB(IDE_LCYL_REG
);
389 args
[5] = hwif
->INB(IDE_HCYL_REG
);
390 args
[6] = hwif
->INB(IDE_SELECT_REG
);
392 } else if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
393 ide_task_t
*args
= (ide_task_t
*) rq
->special
;
395 rq
->errors
= !OK_STAT(stat
,READY_STAT
,BAD_STAT
);
398 if (args
->tf_in_flags
.b
.data
) {
399 u16 data
= hwif
->INW(IDE_DATA_REG
);
400 args
->tfRegister
[IDE_DATA_OFFSET
] = (data
) & 0xFF;
401 args
->hobRegister
[IDE_DATA_OFFSET
] = (data
>> 8) & 0xFF;
403 args
->tfRegister
[IDE_ERROR_OFFSET
] = err
;
404 /* be sure we're looking at the low order bits */
405 hwif
->OUTB(drive
->ctl
& ~0x80, IDE_CONTROL_REG
);
406 args
->tfRegister
[IDE_NSECTOR_OFFSET
] = hwif
->INB(IDE_NSECTOR_REG
);
407 args
->tfRegister
[IDE_SECTOR_OFFSET
] = hwif
->INB(IDE_SECTOR_REG
);
408 args
->tfRegister
[IDE_LCYL_OFFSET
] = hwif
->INB(IDE_LCYL_REG
);
409 args
->tfRegister
[IDE_HCYL_OFFSET
] = hwif
->INB(IDE_HCYL_REG
);
410 args
->tfRegister
[IDE_SELECT_OFFSET
] = hwif
->INB(IDE_SELECT_REG
);
411 args
->tfRegister
[IDE_STATUS_OFFSET
] = stat
;
413 if (drive
->addressing
== 1) {
414 hwif
->OUTB(drive
->ctl
|0x80, IDE_CONTROL_REG
);
415 args
->hobRegister
[IDE_FEATURE_OFFSET
] = hwif
->INB(IDE_FEATURE_REG
);
416 args
->hobRegister
[IDE_NSECTOR_OFFSET
] = hwif
->INB(IDE_NSECTOR_REG
);
417 args
->hobRegister
[IDE_SECTOR_OFFSET
] = hwif
->INB(IDE_SECTOR_REG
);
418 args
->hobRegister
[IDE_LCYL_OFFSET
] = hwif
->INB(IDE_LCYL_REG
);
419 args
->hobRegister
[IDE_HCYL_OFFSET
] = hwif
->INB(IDE_HCYL_REG
);
422 } else if (blk_pm_request(rq
)) {
423 struct request_pm_state
*pm
= rq
->data
;
425 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n",
426 drive
->name
, rq
->pm
->pm_step
, stat
, err
);
428 ide_complete_power_step(drive
, rq
, stat
, err
);
429 if (pm
->pm_step
== ide_pm_state_completed
)
430 ide_complete_pm_request(drive
, rq
);
434 spin_lock_irqsave(&ide_lock
, flags
);
435 blkdev_dequeue_request(rq
);
436 HWGROUP(drive
)->rq
= NULL
;
438 end_that_request_last(rq
, !rq
->errors
);
439 spin_unlock_irqrestore(&ide_lock
, flags
);
442 EXPORT_SYMBOL(ide_end_drive_cmd
);
445 * try_to_flush_leftover_data - flush junk
446 * @drive: drive to flush
448 * try_to_flush_leftover_data() is invoked in response to a drive
449 * unexpectedly having its DRQ_STAT bit set. As an alternative to
450 * resetting the drive, this routine tries to clear the condition
451 * by read a sector's worth of data from the drive. Of course,
452 * this may not help if the drive is *waiting* for data from *us*.
454 static void try_to_flush_leftover_data (ide_drive_t
*drive
)
456 int i
= (drive
->mult_count
? drive
->mult_count
: 1) * SECTOR_WORDS
;
458 if (drive
->media
!= ide_disk
)
462 u32 wcount
= (i
> 16) ? 16 : i
;
465 HWIF(drive
)->ata_input_data(drive
, buffer
, wcount
);
469 static void ide_kill_rq(ide_drive_t
*drive
, struct request
*rq
)
474 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
475 drv
->end_request(drive
, 0, 0);
477 ide_end_request(drive
, 0, 0);
480 static ide_startstop_t
ide_ata_error(ide_drive_t
*drive
, struct request
*rq
, u8 stat
, u8 err
)
482 ide_hwif_t
*hwif
= drive
->hwif
;
484 if (stat
& BUSY_STAT
|| ((stat
& WRERR_STAT
) && !drive
->nowerr
)) {
485 /* other bits are useless when BUSY */
486 rq
->errors
|= ERROR_RESET
;
487 } else if (stat
& ERR_STAT
) {
488 /* err has different meaning on cdrom and tape */
489 if (err
== ABRT_ERR
) {
490 if (drive
->select
.b
.lba
&&
491 /* some newer drives don't support WIN_SPECIFY */
492 hwif
->INB(IDE_COMMAND_REG
) == WIN_SPECIFY
)
494 } else if ((err
& BAD_CRC
) == BAD_CRC
) {
495 /* UDMA crc error, just retry the operation */
497 } else if (err
& (BBD_ERR
| ECC_ERR
)) {
498 /* retries won't help these */
499 rq
->errors
= ERROR_MAX
;
500 } else if (err
& TRK0_ERR
) {
501 /* help it find track zero */
502 rq
->errors
|= ERROR_RECAL
;
506 if ((stat
& DRQ_STAT
) && rq_data_dir(rq
) == READ
&& hwif
->err_stops_fifo
== 0)
507 try_to_flush_leftover_data(drive
);
509 if (hwif
->INB(IDE_STATUS_REG
) & (BUSY_STAT
|DRQ_STAT
))
511 hwif
->OUTB(WIN_IDLEIMMEDIATE
, IDE_COMMAND_REG
);
513 if (rq
->errors
>= ERROR_MAX
|| blk_noretry_request(rq
))
514 ide_kill_rq(drive
, rq
);
516 if ((rq
->errors
& ERROR_RESET
) == ERROR_RESET
) {
518 return ide_do_reset(drive
);
520 if ((rq
->errors
& ERROR_RECAL
) == ERROR_RECAL
)
521 drive
->special
.b
.recalibrate
= 1;
527 static ide_startstop_t
ide_atapi_error(ide_drive_t
*drive
, struct request
*rq
, u8 stat
, u8 err
)
529 ide_hwif_t
*hwif
= drive
->hwif
;
531 if (stat
& BUSY_STAT
|| ((stat
& WRERR_STAT
) && !drive
->nowerr
)) {
532 /* other bits are useless when BUSY */
533 rq
->errors
|= ERROR_RESET
;
535 /* add decoding error stuff */
538 if (hwif
->INB(IDE_STATUS_REG
) & (BUSY_STAT
|DRQ_STAT
))
540 hwif
->OUTB(WIN_IDLEIMMEDIATE
, IDE_COMMAND_REG
);
542 if (rq
->errors
>= ERROR_MAX
) {
543 ide_kill_rq(drive
, rq
);
545 if ((rq
->errors
& ERROR_RESET
) == ERROR_RESET
) {
547 return ide_do_reset(drive
);
556 __ide_error(ide_drive_t
*drive
, struct request
*rq
, u8 stat
, u8 err
)
558 if (drive
->media
== ide_disk
)
559 return ide_ata_error(drive
, rq
, stat
, err
);
560 return ide_atapi_error(drive
, rq
, stat
, err
);
563 EXPORT_SYMBOL_GPL(__ide_error
);
566 * ide_error - handle an error on the IDE
567 * @drive: drive the error occurred on
568 * @msg: message to report
571 * ide_error() takes action based on the error returned by the drive.
572 * For normal I/O that may well include retries. We deal with
573 * both new-style (taskfile) and old style command handling here.
574 * In the case of taskfile command handling there is work left to
578 ide_startstop_t
ide_error (ide_drive_t
*drive
, const char *msg
, u8 stat
)
583 err
= ide_dump_status(drive
, msg
, stat
);
585 if ((rq
= HWGROUP(drive
)->rq
) == NULL
)
588 /* retry only "normal" I/O: */
589 if (!blk_fs_request(rq
)) {
591 ide_end_drive_cmd(drive
, stat
, err
);
598 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
599 return drv
->error(drive
, rq
, stat
, err
);
601 return __ide_error(drive
, rq
, stat
, err
);
604 EXPORT_SYMBOL_GPL(ide_error
);
606 ide_startstop_t
__ide_abort(ide_drive_t
*drive
, struct request
*rq
)
608 if (drive
->media
!= ide_disk
)
609 rq
->errors
|= ERROR_RESET
;
611 ide_kill_rq(drive
, rq
);
616 EXPORT_SYMBOL_GPL(__ide_abort
);
619 * ide_abort - abort pending IDE operations
620 * @drive: drive the error occurred on
621 * @msg: message to report
623 * ide_abort kills and cleans up when we are about to do a
624 * host initiated reset on active commands. Longer term we
625 * want handlers to have sensible abort handling themselves
627 * This differs fundamentally from ide_error because in
628 * this case the command is doing just fine when we
632 ide_startstop_t
ide_abort(ide_drive_t
*drive
, const char *msg
)
636 if (drive
== NULL
|| (rq
= HWGROUP(drive
)->rq
) == NULL
)
639 /* retry only "normal" I/O: */
640 if (!blk_fs_request(rq
)) {
642 ide_end_drive_cmd(drive
, BUSY_STAT
, 0);
649 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
650 return drv
->abort(drive
, rq
);
652 return __ide_abort(drive
, rq
);
656 * ide_cmd - issue a simple drive command
657 * @drive: drive the command is for
659 * @nsect: sector byte
660 * @handler: handler for the command completion
662 * Issue a simple drive command with interrupts.
663 * The drive must be selected beforehand.
666 static void ide_cmd (ide_drive_t
*drive
, u8 cmd
, u8 nsect
,
667 ide_handler_t
*handler
)
669 ide_hwif_t
*hwif
= HWIF(drive
);
671 hwif
->OUTB(drive
->ctl
,IDE_CONTROL_REG
); /* clear nIEN */
672 SELECT_MASK(drive
,0);
673 hwif
->OUTB(nsect
,IDE_NSECTOR_REG
);
674 ide_execute_command(drive
, cmd
, handler
, WAIT_CMD
, NULL
);
678 * drive_cmd_intr - drive command completion interrupt
679 * @drive: drive the completion interrupt occurred on
681 * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD.
682 * We do any necessary data reading and then wait for the drive to
683 * go non busy. At that point we may read the error data and complete
687 static ide_startstop_t
drive_cmd_intr (ide_drive_t
*drive
)
689 struct request
*rq
= HWGROUP(drive
)->rq
;
690 ide_hwif_t
*hwif
= HWIF(drive
);
691 u8
*args
= (u8
*) rq
->buffer
;
692 u8 stat
= hwif
->INB(IDE_STATUS_REG
);
695 local_irq_enable_in_hardirq();
696 if ((stat
& DRQ_STAT
) && args
&& args
[3]) {
697 u8 io_32bit
= drive
->io_32bit
;
699 hwif
->ata_input_data(drive
, &args
[4], args
[3] * SECTOR_WORDS
);
700 drive
->io_32bit
= io_32bit
;
701 while (((stat
= hwif
->INB(IDE_STATUS_REG
)) & BUSY_STAT
) && retries
--)
705 if (!OK_STAT(stat
, READY_STAT
, BAD_STAT
))
706 return ide_error(drive
, "drive_cmd", stat
);
707 /* calls ide_end_drive_cmd */
708 ide_end_drive_cmd(drive
, stat
, hwif
->INB(IDE_ERROR_REG
));
712 static void ide_init_specify_cmd(ide_drive_t
*drive
, ide_task_t
*task
)
714 task
->tfRegister
[IDE_NSECTOR_OFFSET
] = drive
->sect
;
715 task
->tfRegister
[IDE_SECTOR_OFFSET
] = drive
->sect
;
716 task
->tfRegister
[IDE_LCYL_OFFSET
] = drive
->cyl
;
717 task
->tfRegister
[IDE_HCYL_OFFSET
] = drive
->cyl
>>8;
718 task
->tfRegister
[IDE_SELECT_OFFSET
] = ((drive
->head
-1)|drive
->select
.all
)&0xBF;
719 task
->tfRegister
[IDE_COMMAND_OFFSET
] = WIN_SPECIFY
;
721 task
->handler
= &set_geometry_intr
;
724 static void ide_init_restore_cmd(ide_drive_t
*drive
, ide_task_t
*task
)
726 task
->tfRegister
[IDE_NSECTOR_OFFSET
] = drive
->sect
;
727 task
->tfRegister
[IDE_COMMAND_OFFSET
] = WIN_RESTORE
;
729 task
->handler
= &recal_intr
;
732 static void ide_init_setmult_cmd(ide_drive_t
*drive
, ide_task_t
*task
)
734 task
->tfRegister
[IDE_NSECTOR_OFFSET
] = drive
->mult_req
;
735 task
->tfRegister
[IDE_COMMAND_OFFSET
] = WIN_SETMULT
;
737 task
->handler
= &set_multmode_intr
;
740 static ide_startstop_t
ide_disk_special(ide_drive_t
*drive
)
742 special_t
*s
= &drive
->special
;
745 memset(&args
, 0, sizeof(ide_task_t
));
746 args
.command_type
= IDE_DRIVE_TASK_NO_DATA
;
748 if (s
->b
.set_geometry
) {
749 s
->b
.set_geometry
= 0;
750 ide_init_specify_cmd(drive
, &args
);
751 } else if (s
->b
.recalibrate
) {
752 s
->b
.recalibrate
= 0;
753 ide_init_restore_cmd(drive
, &args
);
754 } else if (s
->b
.set_multmode
) {
755 s
->b
.set_multmode
= 0;
756 if (drive
->mult_req
> drive
->id
->max_multsect
)
757 drive
->mult_req
= drive
->id
->max_multsect
;
758 ide_init_setmult_cmd(drive
, &args
);
760 int special
= s
->all
;
762 printk(KERN_ERR
"%s: bad special flag: 0x%02x\n", drive
->name
, special
);
766 do_rw_taskfile(drive
, &args
);
772 * do_special - issue some special commands
773 * @drive: drive the command is for
775 * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT
776 * commands to a drive. It used to do much more, but has been scaled
780 static ide_startstop_t
do_special (ide_drive_t
*drive
)
782 special_t
*s
= &drive
->special
;
785 printk("%s: do_special: 0x%02x\n", drive
->name
, s
->all
);
789 if (HWIF(drive
)->tuneproc
!= NULL
)
790 HWIF(drive
)->tuneproc(drive
, drive
->tune_req
);
793 if (drive
->media
== ide_disk
)
794 return ide_disk_special(drive
);
802 void ide_map_sg(ide_drive_t
*drive
, struct request
*rq
)
804 ide_hwif_t
*hwif
= drive
->hwif
;
805 struct scatterlist
*sg
= hwif
->sg_table
;
807 if (hwif
->sg_mapped
) /* needed by ide-scsi */
810 if (rq
->cmd_type
!= REQ_TYPE_ATA_TASKFILE
) {
811 hwif
->sg_nents
= blk_rq_map_sg(drive
->queue
, rq
, sg
);
813 sg_init_one(sg
, rq
->buffer
, rq
->nr_sectors
* SECTOR_SIZE
);
818 EXPORT_SYMBOL_GPL(ide_map_sg
);
820 void ide_init_sg_cmd(ide_drive_t
*drive
, struct request
*rq
)
822 ide_hwif_t
*hwif
= drive
->hwif
;
824 hwif
->nsect
= hwif
->nleft
= rq
->nr_sectors
;
825 hwif
->cursg
= hwif
->cursg_ofs
= 0;
828 EXPORT_SYMBOL_GPL(ide_init_sg_cmd
);
831 * execute_drive_command - issue special drive command
832 * @drive: the drive to issue the command on
833 * @rq: the request structure holding the command
835 * execute_drive_cmd() issues a special drive command, usually
836 * initiated by ioctl() from the external hdparm program. The
837 * command can be a drive command, drive task or taskfile
838 * operation. Weirdly you can call it with NULL to wait for
839 * all commands to finish. Don't do this as that is due to change
842 static ide_startstop_t
execute_drive_cmd (ide_drive_t
*drive
,
845 ide_hwif_t
*hwif
= HWIF(drive
);
846 if (rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
) {
847 ide_task_t
*args
= rq
->special
;
852 hwif
->data_phase
= args
->data_phase
;
854 switch (hwif
->data_phase
) {
855 case TASKFILE_MULTI_OUT
:
857 case TASKFILE_MULTI_IN
:
859 ide_init_sg_cmd(drive
, rq
);
860 ide_map_sg(drive
, rq
);
865 if (args
->tf_out_flags
.all
!= 0)
866 return flagged_taskfile(drive
, args
);
867 return do_rw_taskfile(drive
, args
);
868 } else if (rq
->cmd_type
== REQ_TYPE_ATA_TASK
) {
869 u8
*args
= rq
->buffer
;
875 printk("%s: DRIVE_TASK_CMD ", drive
->name
);
876 printk("cmd=0x%02x ", args
[0]);
877 printk("fr=0x%02x ", args
[1]);
878 printk("ns=0x%02x ", args
[2]);
879 printk("sc=0x%02x ", args
[3]);
880 printk("lcyl=0x%02x ", args
[4]);
881 printk("hcyl=0x%02x ", args
[5]);
882 printk("sel=0x%02x\n", args
[6]);
884 hwif
->OUTB(args
[1], IDE_FEATURE_REG
);
885 hwif
->OUTB(args
[3], IDE_SECTOR_REG
);
886 hwif
->OUTB(args
[4], IDE_LCYL_REG
);
887 hwif
->OUTB(args
[5], IDE_HCYL_REG
);
888 sel
= (args
[6] & ~0x10);
889 if (drive
->select
.b
.unit
)
891 hwif
->OUTB(sel
, IDE_SELECT_REG
);
892 ide_cmd(drive
, args
[0], args
[2], &drive_cmd_intr
);
894 } else if (rq
->cmd_type
== REQ_TYPE_ATA_CMD
) {
895 u8
*args
= rq
->buffer
;
900 printk("%s: DRIVE_CMD ", drive
->name
);
901 printk("cmd=0x%02x ", args
[0]);
902 printk("sc=0x%02x ", args
[1]);
903 printk("fr=0x%02x ", args
[2]);
904 printk("xx=0x%02x\n", args
[3]);
906 if (args
[0] == WIN_SMART
) {
907 hwif
->OUTB(0x4f, IDE_LCYL_REG
);
908 hwif
->OUTB(0xc2, IDE_HCYL_REG
);
909 hwif
->OUTB(args
[2],IDE_FEATURE_REG
);
910 hwif
->OUTB(args
[1],IDE_SECTOR_REG
);
911 ide_cmd(drive
, args
[0], args
[3], &drive_cmd_intr
);
914 hwif
->OUTB(args
[2],IDE_FEATURE_REG
);
915 ide_cmd(drive
, args
[0], args
[1], &drive_cmd_intr
);
921 * NULL is actually a valid way of waiting for
922 * all current requests to be flushed from the queue.
925 printk("%s: DRIVE_CMD (null)\n", drive
->name
);
927 ide_end_drive_cmd(drive
,
928 hwif
->INB(IDE_STATUS_REG
),
929 hwif
->INB(IDE_ERROR_REG
));
933 static void ide_check_pm_state(ide_drive_t
*drive
, struct request
*rq
)
935 struct request_pm_state
*pm
= rq
->data
;
937 if (blk_pm_suspend_request(rq
) &&
938 pm
->pm_step
== ide_pm_state_start_suspend
)
939 /* Mark drive blocked when starting the suspend sequence. */
941 else if (blk_pm_resume_request(rq
) &&
942 pm
->pm_step
== ide_pm_state_start_resume
) {
944 * The first thing we do on wakeup is to wait for BSY bit to
945 * go away (with a looong timeout) as a drive on this hwif may
946 * just be POSTing itself.
947 * We do that before even selecting as the "other" device on
948 * the bus may be broken enough to walk on our toes at this
953 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive
->name
);
955 rc
= ide_wait_not_busy(HWIF(drive
), 35000);
957 printk(KERN_WARNING
"%s: bus not ready on wakeup\n", drive
->name
);
959 HWIF(drive
)->OUTB(8, HWIF(drive
)->io_ports
[IDE_CONTROL_OFFSET
]);
960 rc
= ide_wait_not_busy(HWIF(drive
), 100000);
962 printk(KERN_WARNING
"%s: drive not ready on wakeup\n", drive
->name
);
967 * start_request - start of I/O and command issuing for IDE
969 * start_request() initiates handling of a new I/O request. It
970 * accepts commands and I/O (read/write) requests. It also does
971 * the final remapping for weird stuff like EZDrive. Once
972 * device mapper can work sector level the EZDrive stuff can go away
974 * FIXME: this function needs a rename
977 static ide_startstop_t
start_request (ide_drive_t
*drive
, struct request
*rq
)
979 ide_startstop_t startstop
;
982 BUG_ON(!blk_rq_started(rq
));
985 printk("%s: start_request: current=0x%08lx\n",
986 HWIF(drive
)->name
, (unsigned long) rq
);
989 /* bail early if we've exceeded max_failures */
990 if (drive
->max_failures
&& (drive
->failures
> drive
->max_failures
)) {
995 if (blk_fs_request(rq
) &&
996 (drive
->media
== ide_disk
|| drive
->media
== ide_floppy
)) {
997 block
+= drive
->sect0
;
999 /* Yecch - this will shift the entire interval,
1000 possibly killing some innocent following sector */
1001 if (block
== 0 && drive
->remap_0_to_1
== 1)
1002 block
= 1; /* redirect MBR access to EZ-Drive partn table */
1004 if (blk_pm_request(rq
))
1005 ide_check_pm_state(drive
, rq
);
1007 SELECT_DRIVE(drive
);
1008 if (ide_wait_stat(&startstop
, drive
, drive
->ready_stat
, BUSY_STAT
|DRQ_STAT
, WAIT_READY
)) {
1009 printk(KERN_ERR
"%s: drive not ready for command\n", drive
->name
);
1012 if (!drive
->special
.all
) {
1015 if (rq
->cmd_type
== REQ_TYPE_ATA_CMD
||
1016 rq
->cmd_type
== REQ_TYPE_ATA_TASK
||
1017 rq
->cmd_type
== REQ_TYPE_ATA_TASKFILE
)
1018 return execute_drive_cmd(drive
, rq
);
1019 else if (blk_pm_request(rq
)) {
1020 struct request_pm_state
*pm
= rq
->data
;
1022 printk("%s: start_power_step(step: %d)\n",
1023 drive
->name
, rq
->pm
->pm_step
);
1025 startstop
= ide_start_power_step(drive
, rq
);
1026 if (startstop
== ide_stopped
&&
1027 pm
->pm_step
== ide_pm_state_completed
)
1028 ide_complete_pm_request(drive
, rq
);
1032 drv
= *(ide_driver_t
**)rq
->rq_disk
->private_data
;
1033 return drv
->do_request(drive
, rq
, block
);
1035 return do_special(drive
);
1037 ide_kill_rq(drive
, rq
);
1042 * ide_stall_queue - pause an IDE device
1043 * @drive: drive to stall
1044 * @timeout: time to stall for (jiffies)
1046 * ide_stall_queue() can be used by a drive to give excess bandwidth back
1047 * to the hwgroup by sleeping for timeout jiffies.
1050 void ide_stall_queue (ide_drive_t
*drive
, unsigned long timeout
)
1052 if (timeout
> WAIT_WORSTCASE
)
1053 timeout
= WAIT_WORSTCASE
;
1054 drive
->sleep
= timeout
+ jiffies
;
1055 drive
->sleeping
= 1;
1058 EXPORT_SYMBOL(ide_stall_queue
);
1060 #define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time)
1063 * choose_drive - select a drive to service
1064 * @hwgroup: hardware group to select on
1066 * choose_drive() selects the next drive which will be serviced.
1067 * This is necessary because the IDE layer can't issue commands
1068 * to both drives on the same cable, unlike SCSI.
1071 static inline ide_drive_t
*choose_drive (ide_hwgroup_t
*hwgroup
)
1073 ide_drive_t
*drive
, *best
;
1077 drive
= hwgroup
->drive
;
1080 * drive is doing pre-flush, ordered write, post-flush sequence. even
1081 * though that is 3 requests, it must be seen as a single transaction.
1082 * we must not preempt this drive until that is complete
1084 if (blk_queue_flushing(drive
->queue
)) {
1086 * small race where queue could get replugged during
1087 * the 3-request flush cycle, just yank the plug since
1088 * we want it to finish asap
1090 blk_remove_plug(drive
->queue
);
1095 if ((!drive
->sleeping
|| time_after_eq(jiffies
, drive
->sleep
))
1096 && !elv_queue_empty(drive
->queue
)) {
1098 || (drive
->sleeping
&& (!best
->sleeping
|| time_before(drive
->sleep
, best
->sleep
)))
1099 || (!best
->sleeping
&& time_before(WAKEUP(drive
), WAKEUP(best
))))
1101 if (!blk_queue_plugged(drive
->queue
))
1105 } while ((drive
= drive
->next
) != hwgroup
->drive
);
1106 if (best
&& best
->nice1
&& !best
->sleeping
&& best
!= hwgroup
->drive
&& best
->service_time
> WAIT_MIN_SLEEP
) {
1107 long t
= (signed long)(WAKEUP(best
) - jiffies
);
1108 if (t
>= WAIT_MIN_SLEEP
) {
1110 * We *may* have some time to spare, but first let's see if
1111 * someone can potentially benefit from our nice mood today..
1115 if (!drive
->sleeping
1116 && time_before(jiffies
- best
->service_time
, WAKEUP(drive
))
1117 && time_before(WAKEUP(drive
), jiffies
+ t
))
1119 ide_stall_queue(best
, min_t(long, t
, 10 * WAIT_MIN_SLEEP
));
1122 } while ((drive
= drive
->next
) != best
);
1129 * Issue a new request to a drive from hwgroup
1130 * Caller must have already done spin_lock_irqsave(&ide_lock, ..);
1132 * A hwgroup is a serialized group of IDE interfaces. Usually there is
1133 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640)
1134 * may have both interfaces in a single hwgroup to "serialize" access.
1135 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped
1136 * together into one hwgroup for serialized access.
1138 * Note also that several hwgroups can end up sharing a single IRQ,
1139 * possibly along with many other devices. This is especially common in
1140 * PCI-based systems with off-board IDE controller cards.
1142 * The IDE driver uses the single global ide_lock spinlock to protect
1143 * access to the request queues, and to protect the hwgroup->busy flag.
1145 * The first thread into the driver for a particular hwgroup sets the
1146 * hwgroup->busy flag to indicate that this hwgroup is now active,
1147 * and then initiates processing of the top request from the request queue.
1149 * Other threads attempting entry notice the busy setting, and will simply
1150 * queue their new requests and exit immediately. Note that hwgroup->busy
1151 * remains set even when the driver is merely awaiting the next interrupt.
1152 * Thus, the meaning is "this hwgroup is busy processing a request".
1154 * When processing of a request completes, the completing thread or IRQ-handler
1155 * will start the next request from the queue. If no more work remains,
1156 * the driver will clear the hwgroup->busy flag and exit.
1158 * The ide_lock (spinlock) is used to protect all access to the
1159 * hwgroup->busy flag, but is otherwise not needed for most processing in
1160 * the driver. This makes the driver much more friendlier to shared IRQs
1161 * than previous designs, while remaining 100% (?) SMP safe and capable.
1163 static void ide_do_request (ide_hwgroup_t
*hwgroup
, int masked_irq
)
1168 ide_startstop_t startstop
;
1171 /* for atari only: POSSIBLY BROKEN HERE(?) */
1172 ide_get_lock(ide_intr
, hwgroup
);
1174 /* caller must own ide_lock */
1175 BUG_ON(!irqs_disabled());
1177 while (!hwgroup
->busy
) {
1179 drive
= choose_drive(hwgroup
);
1180 if (drive
== NULL
) {
1182 unsigned long sleep
= 0; /* shut up, gcc */
1184 drive
= hwgroup
->drive
;
1186 if (drive
->sleeping
&& (!sleeping
|| time_before(drive
->sleep
, sleep
))) {
1188 sleep
= drive
->sleep
;
1190 } while ((drive
= drive
->next
) != hwgroup
->drive
);
1193 * Take a short snooze, and then wake up this hwgroup again.
1194 * This gives other hwgroups on the same a chance to
1195 * play fairly with us, just in case there are big differences
1196 * in relative throughputs.. don't want to hog the cpu too much.
1198 if (time_before(sleep
, jiffies
+ WAIT_MIN_SLEEP
))
1199 sleep
= jiffies
+ WAIT_MIN_SLEEP
;
1201 if (timer_pending(&hwgroup
->timer
))
1202 printk(KERN_CRIT
"ide_set_handler: timer already active\n");
1204 /* so that ide_timer_expiry knows what to do */
1205 hwgroup
->sleeping
= 1;
1206 mod_timer(&hwgroup
->timer
, sleep
);
1207 /* we purposely leave hwgroup->busy==1
1210 /* Ugly, but how can we sleep for the lock
1211 * otherwise? perhaps from tq_disk?
1214 /* for atari only */
1219 /* no more work for this hwgroup (for now) */
1224 if (hwgroup
->hwif
->sharing_irq
&&
1225 hwif
!= hwgroup
->hwif
&&
1226 hwif
->io_ports
[IDE_CONTROL_OFFSET
]) {
1227 /* set nIEN for previous hwif */
1228 SELECT_INTERRUPT(drive
);
1230 hwgroup
->hwif
= hwif
;
1231 hwgroup
->drive
= drive
;
1232 drive
->sleeping
= 0;
1233 drive
->service_start
= jiffies
;
1235 if (blk_queue_plugged(drive
->queue
)) {
1236 printk(KERN_ERR
"ide: huh? queue was plugged!\n");
1241 * we know that the queue isn't empty, but this can happen
1242 * if the q->prep_rq_fn() decides to kill a request
1244 rq
= elv_next_request(drive
->queue
);
1251 * Sanity: don't accept a request that isn't a PM request
1252 * if we are currently power managed. This is very important as
1253 * blk_stop_queue() doesn't prevent the elv_next_request()
1254 * above to return us whatever is in the queue. Since we call
1255 * ide_do_request() ourselves, we end up taking requests while
1256 * the queue is blocked...
1258 * We let requests forced at head of queue with ide-preempt
1259 * though. I hope that doesn't happen too much, hopefully not
1260 * unless the subdriver triggers such a thing in its own PM
1263 * We count how many times we loop here to make sure we service
1264 * all drives in the hwgroup without looping for ever
1266 if (drive
->blocked
&& !blk_pm_request(rq
) && !(rq
->cmd_flags
& REQ_PREEMPT
)) {
1267 drive
= drive
->next
? drive
->next
: hwgroup
->drive
;
1268 if (loops
++ < 4 && !blk_queue_plugged(drive
->queue
))
1270 /* We clear busy, there should be no pending ATA command at this point. */
1278 * Some systems have trouble with IDE IRQs arriving while
1279 * the driver is still setting things up. So, here we disable
1280 * the IRQ used by this interface while the request is being started.
1281 * This may look bad at first, but pretty much the same thing
1282 * happens anyway when any interrupt comes in, IDE or otherwise
1283 * -- the kernel masks the IRQ while it is being handled.
1285 if (masked_irq
!= IDE_NO_IRQ
&& hwif
->irq
!= masked_irq
)
1286 disable_irq_nosync(hwif
->irq
);
1287 spin_unlock(&ide_lock
);
1288 local_irq_enable_in_hardirq();
1289 /* allow other IRQs while we start this request */
1290 startstop
= start_request(drive
, rq
);
1291 spin_lock_irq(&ide_lock
);
1292 if (masked_irq
!= IDE_NO_IRQ
&& hwif
->irq
!= masked_irq
)
1293 enable_irq(hwif
->irq
);
1294 if (startstop
== ide_stopped
)
1300 * Passes the stuff to ide_do_request
1302 void do_ide_request(request_queue_t
*q
)
1304 ide_drive_t
*drive
= q
->queuedata
;
1306 ide_do_request(HWGROUP(drive
), IDE_NO_IRQ
);
1310 * un-busy the hwgroup etc, and clear any pending DMA status. we want to
1311 * retry the current request in pio mode instead of risking tossing it
1314 static ide_startstop_t
ide_dma_timeout_retry(ide_drive_t
*drive
, int error
)
1316 ide_hwif_t
*hwif
= HWIF(drive
);
1318 ide_startstop_t ret
= ide_stopped
;
1321 * end current dma transaction
1325 printk(KERN_WARNING
"%s: DMA timeout error\n", drive
->name
);
1326 (void)HWIF(drive
)->ide_dma_end(drive
);
1327 ret
= ide_error(drive
, "dma timeout error",
1328 hwif
->INB(IDE_STATUS_REG
));
1330 printk(KERN_WARNING
"%s: DMA timeout retry\n", drive
->name
);
1331 (void) hwif
->ide_dma_timeout(drive
);
1335 * disable dma for now, but remember that we did so because of
1336 * a timeout -- we'll reenable after we finish this next request
1337 * (or rather the first chunk of it) in pio.
1340 drive
->state
= DMA_PIO_RETRY
;
1341 (void) hwif
->ide_dma_off_quietly(drive
);
1344 * un-busy drive etc (hwgroup->busy is cleared on return) and
1345 * make sure request is sane
1347 rq
= HWGROUP(drive
)->rq
;
1352 HWGROUP(drive
)->rq
= NULL
;
1359 rq
->sector
= rq
->bio
->bi_sector
;
1360 rq
->current_nr_sectors
= bio_iovec(rq
->bio
)->bv_len
>> 9;
1361 rq
->hard_cur_sectors
= rq
->current_nr_sectors
;
1362 rq
->buffer
= bio_data(rq
->bio
);
1368 * ide_timer_expiry - handle lack of an IDE interrupt
1369 * @data: timer callback magic (hwgroup)
1371 * An IDE command has timed out before the expected drive return
1372 * occurred. At this point we attempt to clean up the current
1373 * mess. If the current handler includes an expiry handler then
1374 * we invoke the expiry handler, and providing it is happy the
1375 * work is done. If that fails we apply generic recovery rules
1376 * invoking the handler and checking the drive DMA status. We
1377 * have an excessively incestuous relationship with the DMA
1378 * logic that wants cleaning up.
1381 void ide_timer_expiry (unsigned long data
)
1383 ide_hwgroup_t
*hwgroup
= (ide_hwgroup_t
*) data
;
1384 ide_handler_t
*handler
;
1385 ide_expiry_t
*expiry
;
1386 unsigned long flags
;
1387 unsigned long wait
= -1;
1389 spin_lock_irqsave(&ide_lock
, flags
);
1391 if ((handler
= hwgroup
->handler
) == NULL
) {
1393 * Either a marginal timeout occurred
1394 * (got the interrupt just as timer expired),
1395 * or we were "sleeping" to give other devices a chance.
1396 * Either way, we don't really want to complain about anything.
1398 if (hwgroup
->sleeping
) {
1399 hwgroup
->sleeping
= 0;
1403 ide_drive_t
*drive
= hwgroup
->drive
;
1405 printk(KERN_ERR
"ide_timer_expiry: hwgroup->drive was NULL\n");
1406 hwgroup
->handler
= NULL
;
1409 ide_startstop_t startstop
= ide_stopped
;
1410 if (!hwgroup
->busy
) {
1411 hwgroup
->busy
= 1; /* paranoia */
1412 printk(KERN_ERR
"%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive
->name
);
1414 if ((expiry
= hwgroup
->expiry
) != NULL
) {
1416 if ((wait
= expiry(drive
)) > 0) {
1418 hwgroup
->timer
.expires
= jiffies
+ wait
;
1419 add_timer(&hwgroup
->timer
);
1420 spin_unlock_irqrestore(&ide_lock
, flags
);
1424 hwgroup
->handler
= NULL
;
1426 * We need to simulate a real interrupt when invoking
1427 * the handler() function, which means we need to
1428 * globally mask the specific IRQ:
1430 spin_unlock(&ide_lock
);
1432 #if DISABLE_IRQ_NOSYNC
1433 disable_irq_nosync(hwif
->irq
);
1435 /* disable_irq_nosync ?? */
1436 disable_irq(hwif
->irq
);
1437 #endif /* DISABLE_IRQ_NOSYNC */
1439 * as if we were handling an interrupt */
1440 local_irq_disable();
1441 if (hwgroup
->polling
) {
1442 startstop
= handler(drive
);
1443 } else if (drive_is_ready(drive
)) {
1444 if (drive
->waiting_for_dma
)
1445 (void) hwgroup
->hwif
->ide_dma_lostirq(drive
);
1446 (void)ide_ack_intr(hwif
);
1447 printk(KERN_WARNING
"%s: lost interrupt\n", drive
->name
);
1448 startstop
= handler(drive
);
1450 if (drive
->waiting_for_dma
) {
1451 startstop
= ide_dma_timeout_retry(drive
, wait
);
1454 ide_error(drive
, "irq timeout", hwif
->INB(IDE_STATUS_REG
));
1456 drive
->service_time
= jiffies
- drive
->service_start
;
1457 spin_lock_irq(&ide_lock
);
1458 enable_irq(hwif
->irq
);
1459 if (startstop
== ide_stopped
)
1463 ide_do_request(hwgroup
, IDE_NO_IRQ
);
1464 spin_unlock_irqrestore(&ide_lock
, flags
);
1468 * unexpected_intr - handle an unexpected IDE interrupt
1469 * @irq: interrupt line
1470 * @hwgroup: hwgroup being processed
1472 * There's nothing really useful we can do with an unexpected interrupt,
1473 * other than reading the status register (to clear it), and logging it.
1474 * There should be no way that an irq can happen before we're ready for it,
1475 * so we needn't worry much about losing an "important" interrupt here.
1477 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
1478 * the drive enters "idle", "standby", or "sleep" mode, so if the status
1479 * looks "good", we just ignore the interrupt completely.
1481 * This routine assumes __cli() is in effect when called.
1483 * If an unexpected interrupt happens on irq15 while we are handling irq14
1484 * and if the two interfaces are "serialized" (CMD640), then it looks like
1485 * we could screw up by interfering with a new request being set up for
1488 * In reality, this is a non-issue. The new command is not sent unless
1489 * the drive is ready to accept one, in which case we know the drive is
1490 * not trying to interrupt us. And ide_set_handler() is always invoked
1491 * before completing the issuance of any new drive command, so we will not
1492 * be accidentally invoked as a result of any valid command completion
1495 * Note that we must walk the entire hwgroup here. We know which hwif
1496 * is doing the current command, but we don't know which hwif burped
1500 static void unexpected_intr (int irq
, ide_hwgroup_t
*hwgroup
)
1503 ide_hwif_t
*hwif
= hwgroup
->hwif
;
1506 * handle the unexpected interrupt
1509 if (hwif
->irq
== irq
) {
1510 stat
= hwif
->INB(hwif
->io_ports
[IDE_STATUS_OFFSET
]);
1511 if (!OK_STAT(stat
, READY_STAT
, BAD_STAT
)) {
1512 /* Try to not flood the console with msgs */
1513 static unsigned long last_msgtime
, count
;
1515 if (time_after(jiffies
, last_msgtime
+ HZ
)) {
1516 last_msgtime
= jiffies
;
1517 printk(KERN_ERR
"%s%s: unexpected interrupt, "
1518 "status=0x%02x, count=%ld\n",
1520 (hwif
->next
==hwgroup
->hwif
) ? "" : "(?)", stat
, count
);
1524 } while ((hwif
= hwif
->next
) != hwgroup
->hwif
);
1528 * ide_intr - default IDE interrupt handler
1529 * @irq: interrupt number
1530 * @dev_id: hwif group
1531 * @regs: unused weirdness from the kernel irq layer
1533 * This is the default IRQ handler for the IDE layer. You should
1534 * not need to override it. If you do be aware it is subtle in
1537 * hwgroup->hwif is the interface in the group currently performing
1538 * a command. hwgroup->drive is the drive and hwgroup->handler is
1539 * the IRQ handler to call. As we issue a command the handlers
1540 * step through multiple states, reassigning the handler to the
1541 * next step in the process. Unlike a smart SCSI controller IDE
1542 * expects the main processor to sequence the various transfer
1543 * stages. We also manage a poll timer to catch up with most
1544 * timeout situations. There are still a few where the handlers
1545 * don't ever decide to give up.
1547 * The handler eventually returns ide_stopped to indicate the
1548 * request completed. At this point we issue the next request
1549 * on the hwgroup and the process begins again.
1552 irqreturn_t
ide_intr (int irq
, void *dev_id
, struct pt_regs
*regs
)
1554 unsigned long flags
;
1555 ide_hwgroup_t
*hwgroup
= (ide_hwgroup_t
*)dev_id
;
1558 ide_handler_t
*handler
;
1559 ide_startstop_t startstop
;
1561 spin_lock_irqsave(&ide_lock
, flags
);
1562 hwif
= hwgroup
->hwif
;
1564 if (!ide_ack_intr(hwif
)) {
1565 spin_unlock_irqrestore(&ide_lock
, flags
);
1569 if ((handler
= hwgroup
->handler
) == NULL
|| hwgroup
->polling
) {
1571 * Not expecting an interrupt from this drive.
1572 * That means this could be:
1573 * (1) an interrupt from another PCI device
1574 * sharing the same PCI INT# as us.
1575 * or (2) a drive just entered sleep or standby mode,
1576 * and is interrupting to let us know.
1577 * or (3) a spurious interrupt of unknown origin.
1579 * For PCI, we cannot tell the difference,
1580 * so in that case we just ignore it and hope it goes away.
1582 * FIXME: unexpected_intr should be hwif-> then we can
1583 * remove all the ifdef PCI crap
1585 #ifdef CONFIG_BLK_DEV_IDEPCI
1586 if (hwif
->pci_dev
&& !hwif
->pci_dev
->vendor
)
1587 #endif /* CONFIG_BLK_DEV_IDEPCI */
1590 * Probably not a shared PCI interrupt,
1591 * so we can safely try to do something about it:
1593 unexpected_intr(irq
, hwgroup
);
1594 #ifdef CONFIG_BLK_DEV_IDEPCI
1597 * Whack the status register, just in case
1598 * we have a leftover pending IRQ.
1600 (void) hwif
->INB(hwif
->io_ports
[IDE_STATUS_OFFSET
]);
1601 #endif /* CONFIG_BLK_DEV_IDEPCI */
1603 spin_unlock_irqrestore(&ide_lock
, flags
);
1606 drive
= hwgroup
->drive
;
1609 * This should NEVER happen, and there isn't much
1610 * we could do about it here.
1612 * [Note - this can occur if the drive is hot unplugged]
1614 spin_unlock_irqrestore(&ide_lock
, flags
);
1617 if (!drive_is_ready(drive
)) {
1619 * This happens regularly when we share a PCI IRQ with
1620 * another device. Unfortunately, it can also happen
1621 * with some buggy drives that trigger the IRQ before
1622 * their status register is up to date. Hopefully we have
1623 * enough advance overhead that the latter isn't a problem.
1625 spin_unlock_irqrestore(&ide_lock
, flags
);
1628 if (!hwgroup
->busy
) {
1629 hwgroup
->busy
= 1; /* paranoia */
1630 printk(KERN_ERR
"%s: ide_intr: hwgroup->busy was 0 ??\n", drive
->name
);
1632 hwgroup
->handler
= NULL
;
1633 del_timer(&hwgroup
->timer
);
1634 spin_unlock(&ide_lock
);
1637 local_irq_enable_in_hardirq();
1638 /* service this interrupt, may set handler for next interrupt */
1639 startstop
= handler(drive
);
1640 spin_lock_irq(&ide_lock
);
1643 * Note that handler() may have set things up for another
1644 * interrupt to occur soon, but it cannot happen until
1645 * we exit from this routine, because it will be the
1646 * same irq as is currently being serviced here, and Linux
1647 * won't allow another of the same (on any CPU) until we return.
1649 drive
->service_time
= jiffies
- drive
->service_start
;
1650 if (startstop
== ide_stopped
) {
1651 if (hwgroup
->handler
== NULL
) { /* paranoia */
1653 ide_do_request(hwgroup
, hwif
->irq
);
1655 printk(KERN_ERR
"%s: ide_intr: huh? expected NULL handler "
1656 "on exit\n", drive
->name
);
1659 spin_unlock_irqrestore(&ide_lock
, flags
);
1664 * ide_init_drive_cmd - initialize a drive command request
1665 * @rq: request object
1667 * Initialize a request before we fill it in and send it down to
1668 * ide_do_drive_cmd. Commands must be set up by this function. Right
1669 * now it doesn't do a lot, but if that changes abusers will have a
1673 void ide_init_drive_cmd (struct request
*rq
)
1675 memset(rq
, 0, sizeof(*rq
));
1676 rq
->cmd_type
= REQ_TYPE_ATA_CMD
;
1680 EXPORT_SYMBOL(ide_init_drive_cmd
);
1683 * ide_do_drive_cmd - issue IDE special command
1684 * @drive: device to issue command
1685 * @rq: request to issue
1686 * @action: action for processing
1688 * This function issues a special IDE device request
1689 * onto the request queue.
1691 * If action is ide_wait, then the rq is queued at the end of the
1692 * request queue, and the function sleeps until it has been processed.
1693 * This is for use when invoked from an ioctl handler.
1695 * If action is ide_preempt, then the rq is queued at the head of
1696 * the request queue, displacing the currently-being-processed
1697 * request and this function returns immediately without waiting
1698 * for the new rq to be completed. This is VERY DANGEROUS, and is
1699 * intended for careful use by the ATAPI tape/cdrom driver code.
1701 * If action is ide_end, then the rq is queued at the end of the
1702 * request queue, and the function returns immediately without waiting
1703 * for the new rq to be completed. This is again intended for careful
1704 * use by the ATAPI tape/cdrom driver code.
1707 int ide_do_drive_cmd (ide_drive_t
*drive
, struct request
*rq
, ide_action_t action
)
1709 unsigned long flags
;
1710 ide_hwgroup_t
*hwgroup
= HWGROUP(drive
);
1711 DECLARE_COMPLETION_ONSTACK(wait
);
1712 int where
= ELEVATOR_INSERT_BACK
, err
;
1713 int must_wait
= (action
== ide_wait
|| action
== ide_head_wait
);
1718 * we need to hold an extra reference to request for safe inspection
1723 rq
->end_io_data
= &wait
;
1724 rq
->end_io
= blk_end_sync_rq
;
1727 spin_lock_irqsave(&ide_lock
, flags
);
1728 if (action
== ide_preempt
)
1730 if (action
== ide_preempt
|| action
== ide_head_wait
) {
1731 where
= ELEVATOR_INSERT_FRONT
;
1732 rq
->cmd_flags
|= REQ_PREEMPT
;
1734 __elv_add_request(drive
->queue
, rq
, where
, 0);
1735 ide_do_request(hwgroup
, IDE_NO_IRQ
);
1736 spin_unlock_irqrestore(&ide_lock
, flags
);
1740 wait_for_completion(&wait
);
1744 blk_put_request(rq
);
1750 EXPORT_SYMBOL(ide_do_drive_cmd
);