]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/ide/ide-io.c
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
[mirror_ubuntu-artful-kernel.git] / drivers / ide / ide-io.c
1 /*
2 * IDE I/O functions
3 *
4 * Basic PIO and command management functionality.
5 *
6 * This code was split off from ide.c. See ide.c for history and original
7 * copyrights.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
12 * later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * For the avoidance of doubt the "preferred form" of this code is one which
20 * is in an open non patent encumbered format. Where cryptographic key signing
21 * forms part of the process of creating an executable the information
22 * including keys needed to generate an equivalently functional executable
23 * are deemed to be part of the source code.
24 */
25
26
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/kernel.h>
32 #include <linux/timer.h>
33 #include <linux/mm.h>
34 #include <linux/interrupt.h>
35 #include <linux/major.h>
36 #include <linux/errno.h>
37 #include <linux/genhd.h>
38 #include <linux/blkpg.h>
39 #include <linux/slab.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/delay.h>
43 #include <linux/ide.h>
44 #include <linux/completion.h>
45 #include <linux/reboot.h>
46 #include <linux/cdrom.h>
47 #include <linux/seq_file.h>
48 #include <linux/device.h>
49 #include <linux/kmod.h>
50 #include <linux/scatterlist.h>
51
52 #include <asm/byteorder.h>
53 #include <asm/irq.h>
54 #include <asm/uaccess.h>
55 #include <asm/io.h>
56 #include <asm/bitops.h>
57
58 static int __ide_end_request(ide_drive_t *drive, struct request *rq,
59 int uptodate, int nr_sectors)
60 {
61 int ret = 1;
62
63 BUG_ON(!(rq->flags & REQ_STARTED));
64
65 /*
66 * if failfast is set on a request, override number of sectors and
67 * complete the whole request right now
68 */
69 if (blk_noretry_request(rq) && end_io_error(uptodate))
70 nr_sectors = rq->hard_nr_sectors;
71
72 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
73 rq->errors = -EIO;
74
75 /*
76 * decide whether to reenable DMA -- 3 is a random magic for now,
77 * if we DMA timeout more than 3 times, just stay in PIO
78 */
79 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
80 drive->state = 0;
81 HWGROUP(drive)->hwif->ide_dma_on(drive);
82 }
83
84 if (!end_that_request_first(rq, uptodate, nr_sectors)) {
85 add_disk_randomness(rq->rq_disk);
86 blkdev_dequeue_request(rq);
87 HWGROUP(drive)->rq = NULL;
88 end_that_request_last(rq, uptodate);
89 ret = 0;
90 }
91
92 return ret;
93 }
94
95 /**
96 * ide_end_request - complete an IDE I/O
97 * @drive: IDE device for the I/O
98 * @uptodate:
99 * @nr_sectors: number of sectors completed
100 *
101 * This is our end_request wrapper function. We complete the I/O
102 * update random number input and dequeue the request, which if
103 * it was tagged may be out of order.
104 */
105
106 int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
107 {
108 struct request *rq;
109 unsigned long flags;
110 int ret = 1;
111
112 /*
113 * room for locking improvements here, the calls below don't
114 * need the queue lock held at all
115 */
116 spin_lock_irqsave(&ide_lock, flags);
117 rq = HWGROUP(drive)->rq;
118
119 if (!nr_sectors)
120 nr_sectors = rq->hard_cur_sectors;
121
122 ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
123
124 spin_unlock_irqrestore(&ide_lock, flags);
125 return ret;
126 }
127 EXPORT_SYMBOL(ide_end_request);
128
129 /*
130 * Power Management state machine. This one is rather trivial for now,
131 * we should probably add more, like switching back to PIO on suspend
132 * to help some BIOSes, re-do the door locking on resume, etc...
133 */
134
135 enum {
136 ide_pm_flush_cache = ide_pm_state_start_suspend,
137 idedisk_pm_standby,
138
139 idedisk_pm_idle = ide_pm_state_start_resume,
140 ide_pm_restore_dma,
141 };
142
143 static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error)
144 {
145 struct request_pm_state *pm = rq->end_io_data;
146
147 if (drive->media != ide_disk)
148 return;
149
150 switch (pm->pm_step) {
151 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) complete */
152 if (pm->pm_state == PM_EVENT_FREEZE)
153 pm->pm_step = ide_pm_state_completed;
154 else
155 pm->pm_step = idedisk_pm_standby;
156 break;
157 case idedisk_pm_standby: /* Suspend step 2 (standby) complete */
158 pm->pm_step = ide_pm_state_completed;
159 break;
160 case idedisk_pm_idle: /* Resume step 1 (idle) complete */
161 pm->pm_step = ide_pm_restore_dma;
162 break;
163 }
164 }
165
166 static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
167 {
168 struct request_pm_state *pm = rq->end_io_data;
169 ide_task_t *args = rq->special;
170
171 memset(args, 0, sizeof(*args));
172
173 if (drive->media != ide_disk) {
174 /* skip idedisk_pm_idle for ATAPI devices */
175 if (pm->pm_step == idedisk_pm_idle)
176 pm->pm_step = ide_pm_restore_dma;
177 }
178
179 switch (pm->pm_step) {
180 case ide_pm_flush_cache: /* Suspend step 1 (flush cache) */
181 if (drive->media != ide_disk)
182 break;
183 /* Not supported? Switch to next step now. */
184 if (!drive->wcache || !ide_id_has_flush_cache(drive->id)) {
185 ide_complete_power_step(drive, rq, 0, 0);
186 return ide_stopped;
187 }
188 if (ide_id_has_flush_cache_ext(drive->id))
189 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE_EXT;
190 else
191 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE;
192 args->command_type = IDE_DRIVE_TASK_NO_DATA;
193 args->handler = &task_no_data_intr;
194 return do_rw_taskfile(drive, args);
195
196 case idedisk_pm_standby: /* Suspend step 2 (standby) */
197 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_STANDBYNOW1;
198 args->command_type = IDE_DRIVE_TASK_NO_DATA;
199 args->handler = &task_no_data_intr;
200 return do_rw_taskfile(drive, args);
201
202 case idedisk_pm_idle: /* Resume step 1 (idle) */
203 args->tfRegister[IDE_COMMAND_OFFSET] = WIN_IDLEIMMEDIATE;
204 args->command_type = IDE_DRIVE_TASK_NO_DATA;
205 args->handler = task_no_data_intr;
206 return do_rw_taskfile(drive, args);
207
208 case ide_pm_restore_dma: /* Resume step 2 (restore DMA) */
209 /*
210 * Right now, all we do is call hwif->ide_dma_check(drive),
211 * we could be smarter and check for current xfer_speed
212 * in struct drive etc...
213 */
214 if ((drive->id->capability & 1) == 0)
215 break;
216 if (drive->hwif->ide_dma_check == NULL)
217 break;
218 drive->hwif->ide_dma_check(drive);
219 break;
220 }
221 pm->pm_step = ide_pm_state_completed;
222 return ide_stopped;
223 }
224
225 /**
226 * ide_complete_pm_request - end the current Power Management request
227 * @drive: target drive
228 * @rq: request
229 *
230 * This function cleans up the current PM request and stops the queue
231 * if necessary.
232 */
233 static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
234 {
235 unsigned long flags;
236
237 #ifdef DEBUG_PM
238 printk("%s: completing PM request, %s\n", drive->name,
239 blk_pm_suspend_request(rq) ? "suspend" : "resume");
240 #endif
241 spin_lock_irqsave(&ide_lock, flags);
242 if (blk_pm_suspend_request(rq)) {
243 blk_stop_queue(drive->queue);
244 } else {
245 drive->blocked = 0;
246 blk_start_queue(drive->queue);
247 }
248 blkdev_dequeue_request(rq);
249 HWGROUP(drive)->rq = NULL;
250 end_that_request_last(rq, 1);
251 spin_unlock_irqrestore(&ide_lock, flags);
252 }
253
254 /*
255 * FIXME: probably move this somewhere else, name is bad too :)
256 */
257 u64 ide_get_error_location(ide_drive_t *drive, char *args)
258 {
259 u32 high, low;
260 u8 hcyl, lcyl, sect;
261 u64 sector;
262
263 high = 0;
264 hcyl = args[5];
265 lcyl = args[4];
266 sect = args[3];
267
268 if (ide_id_has_flush_cache_ext(drive->id)) {
269 low = (hcyl << 16) | (lcyl << 8) | sect;
270 HWIF(drive)->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
271 high = ide_read_24(drive);
272 } else {
273 u8 cur = HWIF(drive)->INB(IDE_SELECT_REG);
274 if (cur & 0x40) {
275 high = cur & 0xf;
276 low = (hcyl << 16) | (lcyl << 8) | sect;
277 } else {
278 low = hcyl * drive->head * drive->sect;
279 low += lcyl * drive->sect;
280 low += sect - 1;
281 }
282 }
283
284 sector = ((u64) high << 24) | low;
285 return sector;
286 }
287 EXPORT_SYMBOL(ide_get_error_location);
288
289 /**
290 * ide_end_drive_cmd - end an explicit drive command
291 * @drive: command
292 * @stat: status bits
293 * @err: error bits
294 *
295 * Clean up after success/failure of an explicit drive command.
296 * These get thrown onto the queue so they are synchronized with
297 * real I/O operations on the drive.
298 *
299 * In LBA48 mode we have to read the register set twice to get
300 * all the extra information out.
301 */
302
303 void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
304 {
305 ide_hwif_t *hwif = HWIF(drive);
306 unsigned long flags;
307 struct request *rq;
308
309 spin_lock_irqsave(&ide_lock, flags);
310 rq = HWGROUP(drive)->rq;
311 spin_unlock_irqrestore(&ide_lock, flags);
312
313 if (rq->flags & REQ_DRIVE_CMD) {
314 u8 *args = (u8 *) rq->buffer;
315 if (rq->errors == 0)
316 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
317
318 if (args) {
319 args[0] = stat;
320 args[1] = err;
321 args[2] = hwif->INB(IDE_NSECTOR_REG);
322 }
323 } else if (rq->flags & REQ_DRIVE_TASK) {
324 u8 *args = (u8 *) rq->buffer;
325 if (rq->errors == 0)
326 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
327
328 if (args) {
329 args[0] = stat;
330 args[1] = err;
331 args[2] = hwif->INB(IDE_NSECTOR_REG);
332 args[3] = hwif->INB(IDE_SECTOR_REG);
333 args[4] = hwif->INB(IDE_LCYL_REG);
334 args[5] = hwif->INB(IDE_HCYL_REG);
335 args[6] = hwif->INB(IDE_SELECT_REG);
336 }
337 } else if (rq->flags & REQ_DRIVE_TASKFILE) {
338 ide_task_t *args = (ide_task_t *) rq->special;
339 if (rq->errors == 0)
340 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
341
342 if (args) {
343 if (args->tf_in_flags.b.data) {
344 u16 data = hwif->INW(IDE_DATA_REG);
345 args->tfRegister[IDE_DATA_OFFSET] = (data) & 0xFF;
346 args->hobRegister[IDE_DATA_OFFSET] = (data >> 8) & 0xFF;
347 }
348 args->tfRegister[IDE_ERROR_OFFSET] = err;
349 /* be sure we're looking at the low order bits */
350 hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG);
351 args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
352 args->tfRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG);
353 args->tfRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG);
354 args->tfRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG);
355 args->tfRegister[IDE_SELECT_OFFSET] = hwif->INB(IDE_SELECT_REG);
356 args->tfRegister[IDE_STATUS_OFFSET] = stat;
357
358 if (drive->addressing == 1) {
359 hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
360 args->hobRegister[IDE_FEATURE_OFFSET] = hwif->INB(IDE_FEATURE_REG);
361 args->hobRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
362 args->hobRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG);
363 args->hobRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG);
364 args->hobRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG);
365 }
366 }
367 } else if (blk_pm_request(rq)) {
368 struct request_pm_state *pm = rq->end_io_data;
369 #ifdef DEBUG_PM
370 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n",
371 drive->name, rq->pm->pm_step, stat, err);
372 #endif
373 ide_complete_power_step(drive, rq, stat, err);
374 if (pm->pm_step == ide_pm_state_completed)
375 ide_complete_pm_request(drive, rq);
376 return;
377 }
378
379 spin_lock_irqsave(&ide_lock, flags);
380 blkdev_dequeue_request(rq);
381 HWGROUP(drive)->rq = NULL;
382 rq->errors = err;
383 end_that_request_last(rq, !rq->errors);
384 spin_unlock_irqrestore(&ide_lock, flags);
385 }
386
387 EXPORT_SYMBOL(ide_end_drive_cmd);
388
389 /**
390 * try_to_flush_leftover_data - flush junk
391 * @drive: drive to flush
392 *
393 * try_to_flush_leftover_data() is invoked in response to a drive
394 * unexpectedly having its DRQ_STAT bit set. As an alternative to
395 * resetting the drive, this routine tries to clear the condition
396 * by read a sector's worth of data from the drive. Of course,
397 * this may not help if the drive is *waiting* for data from *us*.
398 */
399 static void try_to_flush_leftover_data (ide_drive_t *drive)
400 {
401 int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS;
402
403 if (drive->media != ide_disk)
404 return;
405 while (i > 0) {
406 u32 buffer[16];
407 u32 wcount = (i > 16) ? 16 : i;
408
409 i -= wcount;
410 HWIF(drive)->ata_input_data(drive, buffer, wcount);
411 }
412 }
413
414 static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
415 {
416 if (rq->rq_disk) {
417 ide_driver_t *drv;
418
419 drv = *(ide_driver_t **)rq->rq_disk->private_data;
420 drv->end_request(drive, 0, 0);
421 } else
422 ide_end_request(drive, 0, 0);
423 }
424
425 static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
426 {
427 ide_hwif_t *hwif = drive->hwif;
428
429 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) {
430 /* other bits are useless when BUSY */
431 rq->errors |= ERROR_RESET;
432 } else if (stat & ERR_STAT) {
433 /* err has different meaning on cdrom and tape */
434 if (err == ABRT_ERR) {
435 if (drive->select.b.lba &&
436 /* some newer drives don't support WIN_SPECIFY */
437 hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY)
438 return ide_stopped;
439 } else if ((err & BAD_CRC) == BAD_CRC) {
440 /* UDMA crc error, just retry the operation */
441 drive->crc_count++;
442 } else if (err & (BBD_ERR | ECC_ERR)) {
443 /* retries won't help these */
444 rq->errors = ERROR_MAX;
445 } else if (err & TRK0_ERR) {
446 /* help it find track zero */
447 rq->errors |= ERROR_RECAL;
448 }
449 }
450
451 if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ)
452 try_to_flush_leftover_data(drive);
453
454 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
455 /* force an abort */
456 hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG);
457
458 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq))
459 ide_kill_rq(drive, rq);
460 else {
461 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
462 ++rq->errors;
463 return ide_do_reset(drive);
464 }
465 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
466 drive->special.b.recalibrate = 1;
467 ++rq->errors;
468 }
469 return ide_stopped;
470 }
471
472 static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
473 {
474 ide_hwif_t *hwif = drive->hwif;
475
476 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) {
477 /* other bits are useless when BUSY */
478 rq->errors |= ERROR_RESET;
479 } else {
480 /* add decoding error stuff */
481 }
482
483 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
484 /* force an abort */
485 hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG);
486
487 if (rq->errors >= ERROR_MAX) {
488 ide_kill_rq(drive, rq);
489 } else {
490 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
491 ++rq->errors;
492 return ide_do_reset(drive);
493 }
494 ++rq->errors;
495 }
496
497 return ide_stopped;
498 }
499
500 ide_startstop_t
501 __ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
502 {
503 if (drive->media == ide_disk)
504 return ide_ata_error(drive, rq, stat, err);
505 return ide_atapi_error(drive, rq, stat, err);
506 }
507
508 EXPORT_SYMBOL_GPL(__ide_error);
509
510 /**
511 * ide_error - handle an error on the IDE
512 * @drive: drive the error occurred on
513 * @msg: message to report
514 * @stat: status bits
515 *
516 * ide_error() takes action based on the error returned by the drive.
517 * For normal I/O that may well include retries. We deal with
518 * both new-style (taskfile) and old style command handling here.
519 * In the case of taskfile command handling there is work left to
520 * do
521 */
522
523 ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
524 {
525 struct request *rq;
526 u8 err;
527
528 err = ide_dump_status(drive, msg, stat);
529
530 if ((rq = HWGROUP(drive)->rq) == NULL)
531 return ide_stopped;
532
533 /* retry only "normal" I/O: */
534 if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK | REQ_DRIVE_TASKFILE)) {
535 rq->errors = 1;
536 ide_end_drive_cmd(drive, stat, err);
537 return ide_stopped;
538 }
539
540 if (rq->rq_disk) {
541 ide_driver_t *drv;
542
543 drv = *(ide_driver_t **)rq->rq_disk->private_data;
544 return drv->error(drive, rq, stat, err);
545 } else
546 return __ide_error(drive, rq, stat, err);
547 }
548
549 EXPORT_SYMBOL_GPL(ide_error);
550
551 ide_startstop_t __ide_abort(ide_drive_t *drive, struct request *rq)
552 {
553 if (drive->media != ide_disk)
554 rq->errors |= ERROR_RESET;
555
556 ide_kill_rq(drive, rq);
557
558 return ide_stopped;
559 }
560
561 EXPORT_SYMBOL_GPL(__ide_abort);
562
563 /**
564 * ide_abort - abort pending IDE operations
565 * @drive: drive the error occurred on
566 * @msg: message to report
567 *
568 * ide_abort kills and cleans up when we are about to do a
569 * host initiated reset on active commands. Longer term we
570 * want handlers to have sensible abort handling themselves
571 *
572 * This differs fundamentally from ide_error because in
573 * this case the command is doing just fine when we
574 * blow it away.
575 */
576
577 ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg)
578 {
579 struct request *rq;
580
581 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
582 return ide_stopped;
583
584 /* retry only "normal" I/O: */
585 if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK | REQ_DRIVE_TASKFILE)) {
586 rq->errors = 1;
587 ide_end_drive_cmd(drive, BUSY_STAT, 0);
588 return ide_stopped;
589 }
590
591 if (rq->rq_disk) {
592 ide_driver_t *drv;
593
594 drv = *(ide_driver_t **)rq->rq_disk->private_data;
595 return drv->abort(drive, rq);
596 } else
597 return __ide_abort(drive, rq);
598 }
599
600 /**
601 * ide_cmd - issue a simple drive command
602 * @drive: drive the command is for
603 * @cmd: command byte
604 * @nsect: sector byte
605 * @handler: handler for the command completion
606 *
607 * Issue a simple drive command with interrupts.
608 * The drive must be selected beforehand.
609 */
610
611 static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect,
612 ide_handler_t *handler)
613 {
614 ide_hwif_t *hwif = HWIF(drive);
615 if (IDE_CONTROL_REG)
616 hwif->OUTB(drive->ctl,IDE_CONTROL_REG); /* clear nIEN */
617 SELECT_MASK(drive,0);
618 hwif->OUTB(nsect,IDE_NSECTOR_REG);
619 ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL);
620 }
621
622 /**
623 * drive_cmd_intr - drive command completion interrupt
624 * @drive: drive the completion interrupt occurred on
625 *
626 * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD.
627 * We do any necessary data reading and then wait for the drive to
628 * go non busy. At that point we may read the error data and complete
629 * the request
630 */
631
632 static ide_startstop_t drive_cmd_intr (ide_drive_t *drive)
633 {
634 struct request *rq = HWGROUP(drive)->rq;
635 ide_hwif_t *hwif = HWIF(drive);
636 u8 *args = (u8 *) rq->buffer;
637 u8 stat = hwif->INB(IDE_STATUS_REG);
638 int retries = 10;
639
640 local_irq_enable();
641 if ((stat & DRQ_STAT) && args && args[3]) {
642 u8 io_32bit = drive->io_32bit;
643 drive->io_32bit = 0;
644 hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS);
645 drive->io_32bit = io_32bit;
646 while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
647 udelay(100);
648 }
649
650 if (!OK_STAT(stat, READY_STAT, BAD_STAT))
651 return ide_error(drive, "drive_cmd", stat);
652 /* calls ide_end_drive_cmd */
653 ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
654 return ide_stopped;
655 }
656
657 static void ide_init_specify_cmd(ide_drive_t *drive, ide_task_t *task)
658 {
659 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect;
660 task->tfRegister[IDE_SECTOR_OFFSET] = drive->sect;
661 task->tfRegister[IDE_LCYL_OFFSET] = drive->cyl;
662 task->tfRegister[IDE_HCYL_OFFSET] = drive->cyl>>8;
663 task->tfRegister[IDE_SELECT_OFFSET] = ((drive->head-1)|drive->select.all)&0xBF;
664 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SPECIFY;
665
666 task->handler = &set_geometry_intr;
667 }
668
669 static void ide_init_restore_cmd(ide_drive_t *drive, ide_task_t *task)
670 {
671 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect;
672 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_RESTORE;
673
674 task->handler = &recal_intr;
675 }
676
677 static void ide_init_setmult_cmd(ide_drive_t *drive, ide_task_t *task)
678 {
679 task->tfRegister[IDE_NSECTOR_OFFSET] = drive->mult_req;
680 task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETMULT;
681
682 task->handler = &set_multmode_intr;
683 }
684
685 static ide_startstop_t ide_disk_special(ide_drive_t *drive)
686 {
687 special_t *s = &drive->special;
688 ide_task_t args;
689
690 memset(&args, 0, sizeof(ide_task_t));
691 args.command_type = IDE_DRIVE_TASK_NO_DATA;
692
693 if (s->b.set_geometry) {
694 s->b.set_geometry = 0;
695 ide_init_specify_cmd(drive, &args);
696 } else if (s->b.recalibrate) {
697 s->b.recalibrate = 0;
698 ide_init_restore_cmd(drive, &args);
699 } else if (s->b.set_multmode) {
700 s->b.set_multmode = 0;
701 if (drive->mult_req > drive->id->max_multsect)
702 drive->mult_req = drive->id->max_multsect;
703 ide_init_setmult_cmd(drive, &args);
704 } else if (s->all) {
705 int special = s->all;
706 s->all = 0;
707 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special);
708 return ide_stopped;
709 }
710
711 do_rw_taskfile(drive, &args);
712
713 return ide_started;
714 }
715
716 /**
717 * do_special - issue some special commands
718 * @drive: drive the command is for
719 *
720 * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT
721 * commands to a drive. It used to do much more, but has been scaled
722 * back.
723 */
724
725 static ide_startstop_t do_special (ide_drive_t *drive)
726 {
727 special_t *s = &drive->special;
728
729 #ifdef DEBUG
730 printk("%s: do_special: 0x%02x\n", drive->name, s->all);
731 #endif
732 if (s->b.set_tune) {
733 s->b.set_tune = 0;
734 if (HWIF(drive)->tuneproc != NULL)
735 HWIF(drive)->tuneproc(drive, drive->tune_req);
736 return ide_stopped;
737 } else {
738 if (drive->media == ide_disk)
739 return ide_disk_special(drive);
740
741 s->all = 0;
742 drive->mult_req = 0;
743 return ide_stopped;
744 }
745 }
746
747 void ide_map_sg(ide_drive_t *drive, struct request *rq)
748 {
749 ide_hwif_t *hwif = drive->hwif;
750 struct scatterlist *sg = hwif->sg_table;
751
752 if (hwif->sg_mapped) /* needed by ide-scsi */
753 return;
754
755 if ((rq->flags & REQ_DRIVE_TASKFILE) == 0) {
756 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
757 } else {
758 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
759 hwif->sg_nents = 1;
760 }
761 }
762
763 EXPORT_SYMBOL_GPL(ide_map_sg);
764
765 void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
766 {
767 ide_hwif_t *hwif = drive->hwif;
768
769 hwif->nsect = hwif->nleft = rq->nr_sectors;
770 hwif->cursg = hwif->cursg_ofs = 0;
771 }
772
773 EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
774
775 /**
776 * execute_drive_command - issue special drive command
777 * @drive: the drive to issue the command on
778 * @rq: the request structure holding the command
779 *
780 * execute_drive_cmd() issues a special drive command, usually
781 * initiated by ioctl() from the external hdparm program. The
782 * command can be a drive command, drive task or taskfile
783 * operation. Weirdly you can call it with NULL to wait for
784 * all commands to finish. Don't do this as that is due to change
785 */
786
787 static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
788 struct request *rq)
789 {
790 ide_hwif_t *hwif = HWIF(drive);
791 if (rq->flags & REQ_DRIVE_TASKFILE) {
792 ide_task_t *args = rq->special;
793
794 if (!args)
795 goto done;
796
797 hwif->data_phase = args->data_phase;
798
799 switch (hwif->data_phase) {
800 case TASKFILE_MULTI_OUT:
801 case TASKFILE_OUT:
802 case TASKFILE_MULTI_IN:
803 case TASKFILE_IN:
804 ide_init_sg_cmd(drive, rq);
805 ide_map_sg(drive, rq);
806 default:
807 break;
808 }
809
810 if (args->tf_out_flags.all != 0)
811 return flagged_taskfile(drive, args);
812 return do_rw_taskfile(drive, args);
813 } else if (rq->flags & REQ_DRIVE_TASK) {
814 u8 *args = rq->buffer;
815 u8 sel;
816
817 if (!args)
818 goto done;
819 #ifdef DEBUG
820 printk("%s: DRIVE_TASK_CMD ", drive->name);
821 printk("cmd=0x%02x ", args[0]);
822 printk("fr=0x%02x ", args[1]);
823 printk("ns=0x%02x ", args[2]);
824 printk("sc=0x%02x ", args[3]);
825 printk("lcyl=0x%02x ", args[4]);
826 printk("hcyl=0x%02x ", args[5]);
827 printk("sel=0x%02x\n", args[6]);
828 #endif
829 hwif->OUTB(args[1], IDE_FEATURE_REG);
830 hwif->OUTB(args[3], IDE_SECTOR_REG);
831 hwif->OUTB(args[4], IDE_LCYL_REG);
832 hwif->OUTB(args[5], IDE_HCYL_REG);
833 sel = (args[6] & ~0x10);
834 if (drive->select.b.unit)
835 sel |= 0x10;
836 hwif->OUTB(sel, IDE_SELECT_REG);
837 ide_cmd(drive, args[0], args[2], &drive_cmd_intr);
838 return ide_started;
839 } else if (rq->flags & REQ_DRIVE_CMD) {
840 u8 *args = rq->buffer;
841
842 if (!args)
843 goto done;
844 #ifdef DEBUG
845 printk("%s: DRIVE_CMD ", drive->name);
846 printk("cmd=0x%02x ", args[0]);
847 printk("sc=0x%02x ", args[1]);
848 printk("fr=0x%02x ", args[2]);
849 printk("xx=0x%02x\n", args[3]);
850 #endif
851 if (args[0] == WIN_SMART) {
852 hwif->OUTB(0x4f, IDE_LCYL_REG);
853 hwif->OUTB(0xc2, IDE_HCYL_REG);
854 hwif->OUTB(args[2],IDE_FEATURE_REG);
855 hwif->OUTB(args[1],IDE_SECTOR_REG);
856 ide_cmd(drive, args[0], args[3], &drive_cmd_intr);
857 return ide_started;
858 }
859 hwif->OUTB(args[2],IDE_FEATURE_REG);
860 ide_cmd(drive, args[0], args[1], &drive_cmd_intr);
861 return ide_started;
862 }
863
864 done:
865 /*
866 * NULL is actually a valid way of waiting for
867 * all current requests to be flushed from the queue.
868 */
869 #ifdef DEBUG
870 printk("%s: DRIVE_CMD (null)\n", drive->name);
871 #endif
872 ide_end_drive_cmd(drive,
873 hwif->INB(IDE_STATUS_REG),
874 hwif->INB(IDE_ERROR_REG));
875 return ide_stopped;
876 }
877
878 static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
879 {
880 struct request_pm_state *pm = rq->end_io_data;
881
882 if (blk_pm_suspend_request(rq) &&
883 pm->pm_step == ide_pm_state_start_suspend)
884 /* Mark drive blocked when starting the suspend sequence. */
885 drive->blocked = 1;
886 else if (blk_pm_resume_request(rq) &&
887 pm->pm_step == ide_pm_state_start_resume) {
888 /*
889 * The first thing we do on wakeup is to wait for BSY bit to
890 * go away (with a looong timeout) as a drive on this hwif may
891 * just be POSTing itself.
892 * We do that before even selecting as the "other" device on
893 * the bus may be broken enough to walk on our toes at this
894 * point.
895 */
896 int rc;
897 #ifdef DEBUG_PM
898 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
899 #endif
900 rc = ide_wait_not_busy(HWIF(drive), 35000);
901 if (rc)
902 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
903 SELECT_DRIVE(drive);
904 HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]);
905 rc = ide_wait_not_busy(HWIF(drive), 10000);
906 if (rc)
907 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
908 }
909 }
910
911 /**
912 * start_request - start of I/O and command issuing for IDE
913 *
914 * start_request() initiates handling of a new I/O request. It
915 * accepts commands and I/O (read/write) requests. It also does
916 * the final remapping for weird stuff like EZDrive. Once
917 * device mapper can work sector level the EZDrive stuff can go away
918 *
919 * FIXME: this function needs a rename
920 */
921
922 static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
923 {
924 ide_startstop_t startstop;
925 sector_t block;
926
927 BUG_ON(!(rq->flags & REQ_STARTED));
928
929 #ifdef DEBUG
930 printk("%s: start_request: current=0x%08lx\n",
931 HWIF(drive)->name, (unsigned long) rq);
932 #endif
933
934 /* bail early if we've exceeded max_failures */
935 if (drive->max_failures && (drive->failures > drive->max_failures)) {
936 goto kill_rq;
937 }
938
939 block = rq->sector;
940 if (blk_fs_request(rq) &&
941 (drive->media == ide_disk || drive->media == ide_floppy)) {
942 block += drive->sect0;
943 }
944 /* Yecch - this will shift the entire interval,
945 possibly killing some innocent following sector */
946 if (block == 0 && drive->remap_0_to_1 == 1)
947 block = 1; /* redirect MBR access to EZ-Drive partn table */
948
949 if (blk_pm_request(rq))
950 ide_check_pm_state(drive, rq);
951
952 SELECT_DRIVE(drive);
953 if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) {
954 printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
955 return startstop;
956 }
957 if (!drive->special.all) {
958 ide_driver_t *drv;
959
960 if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK))
961 return execute_drive_cmd(drive, rq);
962 else if (rq->flags & REQ_DRIVE_TASKFILE)
963 return execute_drive_cmd(drive, rq);
964 else if (blk_pm_request(rq)) {
965 struct request_pm_state *pm = rq->end_io_data;
966 #ifdef DEBUG_PM
967 printk("%s: start_power_step(step: %d)\n",
968 drive->name, rq->pm->pm_step);
969 #endif
970 startstop = ide_start_power_step(drive, rq);
971 if (startstop == ide_stopped &&
972 pm->pm_step == ide_pm_state_completed)
973 ide_complete_pm_request(drive, rq);
974 return startstop;
975 }
976
977 drv = *(ide_driver_t **)rq->rq_disk->private_data;
978 return drv->do_request(drive, rq, block);
979 }
980 return do_special(drive);
981 kill_rq:
982 ide_kill_rq(drive, rq);
983 return ide_stopped;
984 }
985
986 /**
987 * ide_stall_queue - pause an IDE device
988 * @drive: drive to stall
989 * @timeout: time to stall for (jiffies)
990 *
991 * ide_stall_queue() can be used by a drive to give excess bandwidth back
992 * to the hwgroup by sleeping for timeout jiffies.
993 */
994
995 void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
996 {
997 if (timeout > WAIT_WORSTCASE)
998 timeout = WAIT_WORSTCASE;
999 drive->sleep = timeout + jiffies;
1000 drive->sleeping = 1;
1001 }
1002
1003 EXPORT_SYMBOL(ide_stall_queue);
1004
1005 #define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time)
1006
1007 /**
1008 * choose_drive - select a drive to service
1009 * @hwgroup: hardware group to select on
1010 *
1011 * choose_drive() selects the next drive which will be serviced.
1012 * This is necessary because the IDE layer can't issue commands
1013 * to both drives on the same cable, unlike SCSI.
1014 */
1015
1016 static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup)
1017 {
1018 ide_drive_t *drive, *best;
1019
1020 repeat:
1021 best = NULL;
1022 drive = hwgroup->drive;
1023
1024 /*
1025 * drive is doing pre-flush, ordered write, post-flush sequence. even
1026 * though that is 3 requests, it must be seen as a single transaction.
1027 * we must not preempt this drive until that is complete
1028 */
1029 if (blk_queue_flushing(drive->queue)) {
1030 /*
1031 * small race where queue could get replugged during
1032 * the 3-request flush cycle, just yank the plug since
1033 * we want it to finish asap
1034 */
1035 blk_remove_plug(drive->queue);
1036 return drive;
1037 }
1038
1039 do {
1040 if ((!drive->sleeping || time_after_eq(jiffies, drive->sleep))
1041 && !elv_queue_empty(drive->queue)) {
1042 if (!best
1043 || (drive->sleeping && (!best->sleeping || time_before(drive->sleep, best->sleep)))
1044 || (!best->sleeping && time_before(WAKEUP(drive), WAKEUP(best))))
1045 {
1046 if (!blk_queue_plugged(drive->queue))
1047 best = drive;
1048 }
1049 }
1050 } while ((drive = drive->next) != hwgroup->drive);
1051 if (best && best->nice1 && !best->sleeping && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) {
1052 long t = (signed long)(WAKEUP(best) - jiffies);
1053 if (t >= WAIT_MIN_SLEEP) {
1054 /*
1055 * We *may* have some time to spare, but first let's see if
1056 * someone can potentially benefit from our nice mood today..
1057 */
1058 drive = best->next;
1059 do {
1060 if (!drive->sleeping
1061 && time_before(jiffies - best->service_time, WAKEUP(drive))
1062 && time_before(WAKEUP(drive), jiffies + t))
1063 {
1064 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP));
1065 goto repeat;
1066 }
1067 } while ((drive = drive->next) != best);
1068 }
1069 }
1070 return best;
1071 }
1072
1073 /*
1074 * Issue a new request to a drive from hwgroup
1075 * Caller must have already done spin_lock_irqsave(&ide_lock, ..);
1076 *
1077 * A hwgroup is a serialized group of IDE interfaces. Usually there is
1078 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640)
1079 * may have both interfaces in a single hwgroup to "serialize" access.
1080 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped
1081 * together into one hwgroup for serialized access.
1082 *
1083 * Note also that several hwgroups can end up sharing a single IRQ,
1084 * possibly along with many other devices. This is especially common in
1085 * PCI-based systems with off-board IDE controller cards.
1086 *
1087 * The IDE driver uses the single global ide_lock spinlock to protect
1088 * access to the request queues, and to protect the hwgroup->busy flag.
1089 *
1090 * The first thread into the driver for a particular hwgroup sets the
1091 * hwgroup->busy flag to indicate that this hwgroup is now active,
1092 * and then initiates processing of the top request from the request queue.
1093 *
1094 * Other threads attempting entry notice the busy setting, and will simply
1095 * queue their new requests and exit immediately. Note that hwgroup->busy
1096 * remains set even when the driver is merely awaiting the next interrupt.
1097 * Thus, the meaning is "this hwgroup is busy processing a request".
1098 *
1099 * When processing of a request completes, the completing thread or IRQ-handler
1100 * will start the next request from the queue. If no more work remains,
1101 * the driver will clear the hwgroup->busy flag and exit.
1102 *
1103 * The ide_lock (spinlock) is used to protect all access to the
1104 * hwgroup->busy flag, but is otherwise not needed for most processing in
1105 * the driver. This makes the driver much more friendlier to shared IRQs
1106 * than previous designs, while remaining 100% (?) SMP safe and capable.
1107 */
1108 static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1109 {
1110 ide_drive_t *drive;
1111 ide_hwif_t *hwif;
1112 struct request *rq;
1113 ide_startstop_t startstop;
1114 int loops = 0;
1115
1116 /* for atari only: POSSIBLY BROKEN HERE(?) */
1117 ide_get_lock(ide_intr, hwgroup);
1118
1119 /* caller must own ide_lock */
1120 BUG_ON(!irqs_disabled());
1121
1122 while (!hwgroup->busy) {
1123 hwgroup->busy = 1;
1124 drive = choose_drive(hwgroup);
1125 if (drive == NULL) {
1126 int sleeping = 0;
1127 unsigned long sleep = 0; /* shut up, gcc */
1128 hwgroup->rq = NULL;
1129 drive = hwgroup->drive;
1130 do {
1131 if (drive->sleeping && (!sleeping || time_before(drive->sleep, sleep))) {
1132 sleeping = 1;
1133 sleep = drive->sleep;
1134 }
1135 } while ((drive = drive->next) != hwgroup->drive);
1136 if (sleeping) {
1137 /*
1138 * Take a short snooze, and then wake up this hwgroup again.
1139 * This gives other hwgroups on the same a chance to
1140 * play fairly with us, just in case there are big differences
1141 * in relative throughputs.. don't want to hog the cpu too much.
1142 */
1143 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP))
1144 sleep = jiffies + WAIT_MIN_SLEEP;
1145 #if 1
1146 if (timer_pending(&hwgroup->timer))
1147 printk(KERN_CRIT "ide_set_handler: timer already active\n");
1148 #endif
1149 /* so that ide_timer_expiry knows what to do */
1150 hwgroup->sleeping = 1;
1151 mod_timer(&hwgroup->timer, sleep);
1152 /* we purposely leave hwgroup->busy==1
1153 * while sleeping */
1154 } else {
1155 /* Ugly, but how can we sleep for the lock
1156 * otherwise? perhaps from tq_disk?
1157 */
1158
1159 /* for atari only */
1160 ide_release_lock();
1161 hwgroup->busy = 0;
1162 }
1163
1164 /* no more work for this hwgroup (for now) */
1165 return;
1166 }
1167 again:
1168 hwif = HWIF(drive);
1169 if (hwgroup->hwif->sharing_irq &&
1170 hwif != hwgroup->hwif &&
1171 hwif->io_ports[IDE_CONTROL_OFFSET]) {
1172 /* set nIEN for previous hwif */
1173 SELECT_INTERRUPT(drive);
1174 }
1175 hwgroup->hwif = hwif;
1176 hwgroup->drive = drive;
1177 drive->sleeping = 0;
1178 drive->service_start = jiffies;
1179
1180 if (blk_queue_plugged(drive->queue)) {
1181 printk(KERN_ERR "ide: huh? queue was plugged!\n");
1182 break;
1183 }
1184
1185 /*
1186 * we know that the queue isn't empty, but this can happen
1187 * if the q->prep_rq_fn() decides to kill a request
1188 */
1189 rq = elv_next_request(drive->queue);
1190 if (!rq) {
1191 hwgroup->busy = 0;
1192 break;
1193 }
1194
1195 /*
1196 * Sanity: don't accept a request that isn't a PM request
1197 * if we are currently power managed. This is very important as
1198 * blk_stop_queue() doesn't prevent the elv_next_request()
1199 * above to return us whatever is in the queue. Since we call
1200 * ide_do_request() ourselves, we end up taking requests while
1201 * the queue is blocked...
1202 *
1203 * We let requests forced at head of queue with ide-preempt
1204 * though. I hope that doesn't happen too much, hopefully not
1205 * unless the subdriver triggers such a thing in its own PM
1206 * state machine.
1207 *
1208 * We count how many times we loop here to make sure we service
1209 * all drives in the hwgroup without looping for ever
1210 */
1211 if (drive->blocked && !blk_pm_request(rq) && !(rq->flags & REQ_PREEMPT)) {
1212 drive = drive->next ? drive->next : hwgroup->drive;
1213 if (loops++ < 4 && !blk_queue_plugged(drive->queue))
1214 goto again;
1215 /* We clear busy, there should be no pending ATA command at this point. */
1216 hwgroup->busy = 0;
1217 break;
1218 }
1219
1220 hwgroup->rq = rq;
1221
1222 /*
1223 * Some systems have trouble with IDE IRQs arriving while
1224 * the driver is still setting things up. So, here we disable
1225 * the IRQ used by this interface while the request is being started.
1226 * This may look bad at first, but pretty much the same thing
1227 * happens anyway when any interrupt comes in, IDE or otherwise
1228 * -- the kernel masks the IRQ while it is being handled.
1229 */
1230 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1231 disable_irq_nosync(hwif->irq);
1232 spin_unlock(&ide_lock);
1233 local_irq_enable();
1234 /* allow other IRQs while we start this request */
1235 startstop = start_request(drive, rq);
1236 spin_lock_irq(&ide_lock);
1237 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1238 enable_irq(hwif->irq);
1239 if (startstop == ide_stopped)
1240 hwgroup->busy = 0;
1241 }
1242 }
1243
1244 /*
1245 * Passes the stuff to ide_do_request
1246 */
1247 void do_ide_request(request_queue_t *q)
1248 {
1249 ide_drive_t *drive = q->queuedata;
1250
1251 ide_do_request(HWGROUP(drive), IDE_NO_IRQ);
1252 }
1253
1254 /*
1255 * un-busy the hwgroup etc, and clear any pending DMA status. we want to
1256 * retry the current request in pio mode instead of risking tossing it
1257 * all away
1258 */
1259 static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
1260 {
1261 ide_hwif_t *hwif = HWIF(drive);
1262 struct request *rq;
1263 ide_startstop_t ret = ide_stopped;
1264
1265 /*
1266 * end current dma transaction
1267 */
1268
1269 if (error < 0) {
1270 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
1271 (void)HWIF(drive)->ide_dma_end(drive);
1272 ret = ide_error(drive, "dma timeout error",
1273 hwif->INB(IDE_STATUS_REG));
1274 } else {
1275 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
1276 (void) hwif->ide_dma_timeout(drive);
1277 }
1278
1279 /*
1280 * disable dma for now, but remember that we did so because of
1281 * a timeout -- we'll reenable after we finish this next request
1282 * (or rather the first chunk of it) in pio.
1283 */
1284 drive->retry_pio++;
1285 drive->state = DMA_PIO_RETRY;
1286 (void) hwif->ide_dma_off_quietly(drive);
1287
1288 /*
1289 * un-busy drive etc (hwgroup->busy is cleared on return) and
1290 * make sure request is sane
1291 */
1292 rq = HWGROUP(drive)->rq;
1293 HWGROUP(drive)->rq = NULL;
1294
1295 rq->errors = 0;
1296
1297 if (!rq->bio)
1298 goto out;
1299
1300 rq->sector = rq->bio->bi_sector;
1301 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
1302 rq->hard_cur_sectors = rq->current_nr_sectors;
1303 rq->buffer = bio_data(rq->bio);
1304 out:
1305 return ret;
1306 }
1307
1308 /**
1309 * ide_timer_expiry - handle lack of an IDE interrupt
1310 * @data: timer callback magic (hwgroup)
1311 *
1312 * An IDE command has timed out before the expected drive return
1313 * occurred. At this point we attempt to clean up the current
1314 * mess. If the current handler includes an expiry handler then
1315 * we invoke the expiry handler, and providing it is happy the
1316 * work is done. If that fails we apply generic recovery rules
1317 * invoking the handler and checking the drive DMA status. We
1318 * have an excessively incestuous relationship with the DMA
1319 * logic that wants cleaning up.
1320 */
1321
1322 void ide_timer_expiry (unsigned long data)
1323 {
1324 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data;
1325 ide_handler_t *handler;
1326 ide_expiry_t *expiry;
1327 unsigned long flags;
1328 unsigned long wait = -1;
1329
1330 spin_lock_irqsave(&ide_lock, flags);
1331
1332 if ((handler = hwgroup->handler) == NULL) {
1333 /*
1334 * Either a marginal timeout occurred
1335 * (got the interrupt just as timer expired),
1336 * or we were "sleeping" to give other devices a chance.
1337 * Either way, we don't really want to complain about anything.
1338 */
1339 if (hwgroup->sleeping) {
1340 hwgroup->sleeping = 0;
1341 hwgroup->busy = 0;
1342 }
1343 } else {
1344 ide_drive_t *drive = hwgroup->drive;
1345 if (!drive) {
1346 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n");
1347 hwgroup->handler = NULL;
1348 } else {
1349 ide_hwif_t *hwif;
1350 ide_startstop_t startstop = ide_stopped;
1351 if (!hwgroup->busy) {
1352 hwgroup->busy = 1; /* paranoia */
1353 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name);
1354 }
1355 if ((expiry = hwgroup->expiry) != NULL) {
1356 /* continue */
1357 if ((wait = expiry(drive)) > 0) {
1358 /* reset timer */
1359 hwgroup->timer.expires = jiffies + wait;
1360 add_timer(&hwgroup->timer);
1361 spin_unlock_irqrestore(&ide_lock, flags);
1362 return;
1363 }
1364 }
1365 hwgroup->handler = NULL;
1366 /*
1367 * We need to simulate a real interrupt when invoking
1368 * the handler() function, which means we need to
1369 * globally mask the specific IRQ:
1370 */
1371 spin_unlock(&ide_lock);
1372 hwif = HWIF(drive);
1373 #if DISABLE_IRQ_NOSYNC
1374 disable_irq_nosync(hwif->irq);
1375 #else
1376 /* disable_irq_nosync ?? */
1377 disable_irq(hwif->irq);
1378 #endif /* DISABLE_IRQ_NOSYNC */
1379 /* local CPU only,
1380 * as if we were handling an interrupt */
1381 local_irq_disable();
1382 if (hwgroup->polling) {
1383 startstop = handler(drive);
1384 } else if (drive_is_ready(drive)) {
1385 if (drive->waiting_for_dma)
1386 (void) hwgroup->hwif->ide_dma_lostirq(drive);
1387 (void)ide_ack_intr(hwif);
1388 printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
1389 startstop = handler(drive);
1390 } else {
1391 if (drive->waiting_for_dma) {
1392 startstop = ide_dma_timeout_retry(drive, wait);
1393 } else
1394 startstop =
1395 ide_error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG));
1396 }
1397 drive->service_time = jiffies - drive->service_start;
1398 spin_lock_irq(&ide_lock);
1399 enable_irq(hwif->irq);
1400 if (startstop == ide_stopped)
1401 hwgroup->busy = 0;
1402 }
1403 }
1404 ide_do_request(hwgroup, IDE_NO_IRQ);
1405 spin_unlock_irqrestore(&ide_lock, flags);
1406 }
1407
1408 /**
1409 * unexpected_intr - handle an unexpected IDE interrupt
1410 * @irq: interrupt line
1411 * @hwgroup: hwgroup being processed
1412 *
1413 * There's nothing really useful we can do with an unexpected interrupt,
1414 * other than reading the status register (to clear it), and logging it.
1415 * There should be no way that an irq can happen before we're ready for it,
1416 * so we needn't worry much about losing an "important" interrupt here.
1417 *
1418 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
1419 * the drive enters "idle", "standby", or "sleep" mode, so if the status
1420 * looks "good", we just ignore the interrupt completely.
1421 *
1422 * This routine assumes __cli() is in effect when called.
1423 *
1424 * If an unexpected interrupt happens on irq15 while we are handling irq14
1425 * and if the two interfaces are "serialized" (CMD640), then it looks like
1426 * we could screw up by interfering with a new request being set up for
1427 * irq15.
1428 *
1429 * In reality, this is a non-issue. The new command is not sent unless
1430 * the drive is ready to accept one, in which case we know the drive is
1431 * not trying to interrupt us. And ide_set_handler() is always invoked
1432 * before completing the issuance of any new drive command, so we will not
1433 * be accidentally invoked as a result of any valid command completion
1434 * interrupt.
1435 *
1436 * Note that we must walk the entire hwgroup here. We know which hwif
1437 * is doing the current command, but we don't know which hwif burped
1438 * mysteriously.
1439 */
1440
1441 static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
1442 {
1443 u8 stat;
1444 ide_hwif_t *hwif = hwgroup->hwif;
1445
1446 /*
1447 * handle the unexpected interrupt
1448 */
1449 do {
1450 if (hwif->irq == irq) {
1451 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
1452 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
1453 /* Try to not flood the console with msgs */
1454 static unsigned long last_msgtime, count;
1455 ++count;
1456 if (time_after(jiffies, last_msgtime + HZ)) {
1457 last_msgtime = jiffies;
1458 printk(KERN_ERR "%s%s: unexpected interrupt, "
1459 "status=0x%02x, count=%ld\n",
1460 hwif->name,
1461 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count);
1462 }
1463 }
1464 }
1465 } while ((hwif = hwif->next) != hwgroup->hwif);
1466 }
1467
1468 /**
1469 * ide_intr - default IDE interrupt handler
1470 * @irq: interrupt number
1471 * @dev_id: hwif group
1472 * @regs: unused weirdness from the kernel irq layer
1473 *
1474 * This is the default IRQ handler for the IDE layer. You should
1475 * not need to override it. If you do be aware it is subtle in
1476 * places
1477 *
1478 * hwgroup->hwif is the interface in the group currently performing
1479 * a command. hwgroup->drive is the drive and hwgroup->handler is
1480 * the IRQ handler to call. As we issue a command the handlers
1481 * step through multiple states, reassigning the handler to the
1482 * next step in the process. Unlike a smart SCSI controller IDE
1483 * expects the main processor to sequence the various transfer
1484 * stages. We also manage a poll timer to catch up with most
1485 * timeout situations. There are still a few where the handlers
1486 * don't ever decide to give up.
1487 *
1488 * The handler eventually returns ide_stopped to indicate the
1489 * request completed. At this point we issue the next request
1490 * on the hwgroup and the process begins again.
1491 */
1492
1493 irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs)
1494 {
1495 unsigned long flags;
1496 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
1497 ide_hwif_t *hwif;
1498 ide_drive_t *drive;
1499 ide_handler_t *handler;
1500 ide_startstop_t startstop;
1501
1502 spin_lock_irqsave(&ide_lock, flags);
1503 hwif = hwgroup->hwif;
1504
1505 if (!ide_ack_intr(hwif)) {
1506 spin_unlock_irqrestore(&ide_lock, flags);
1507 return IRQ_NONE;
1508 }
1509
1510 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) {
1511 /*
1512 * Not expecting an interrupt from this drive.
1513 * That means this could be:
1514 * (1) an interrupt from another PCI device
1515 * sharing the same PCI INT# as us.
1516 * or (2) a drive just entered sleep or standby mode,
1517 * and is interrupting to let us know.
1518 * or (3) a spurious interrupt of unknown origin.
1519 *
1520 * For PCI, we cannot tell the difference,
1521 * so in that case we just ignore it and hope it goes away.
1522 *
1523 * FIXME: unexpected_intr should be hwif-> then we can
1524 * remove all the ifdef PCI crap
1525 */
1526 #ifdef CONFIG_BLK_DEV_IDEPCI
1527 if (hwif->pci_dev && !hwif->pci_dev->vendor)
1528 #endif /* CONFIG_BLK_DEV_IDEPCI */
1529 {
1530 /*
1531 * Probably not a shared PCI interrupt,
1532 * so we can safely try to do something about it:
1533 */
1534 unexpected_intr(irq, hwgroup);
1535 #ifdef CONFIG_BLK_DEV_IDEPCI
1536 } else {
1537 /*
1538 * Whack the status register, just in case
1539 * we have a leftover pending IRQ.
1540 */
1541 (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
1542 #endif /* CONFIG_BLK_DEV_IDEPCI */
1543 }
1544 spin_unlock_irqrestore(&ide_lock, flags);
1545 return IRQ_NONE;
1546 }
1547 drive = hwgroup->drive;
1548 if (!drive) {
1549 /*
1550 * This should NEVER happen, and there isn't much
1551 * we could do about it here.
1552 *
1553 * [Note - this can occur if the drive is hot unplugged]
1554 */
1555 spin_unlock_irqrestore(&ide_lock, flags);
1556 return IRQ_HANDLED;
1557 }
1558 if (!drive_is_ready(drive)) {
1559 /*
1560 * This happens regularly when we share a PCI IRQ with
1561 * another device. Unfortunately, it can also happen
1562 * with some buggy drives that trigger the IRQ before
1563 * their status register is up to date. Hopefully we have
1564 * enough advance overhead that the latter isn't a problem.
1565 */
1566 spin_unlock_irqrestore(&ide_lock, flags);
1567 return IRQ_NONE;
1568 }
1569 if (!hwgroup->busy) {
1570 hwgroup->busy = 1; /* paranoia */
1571 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name);
1572 }
1573 hwgroup->handler = NULL;
1574 del_timer(&hwgroup->timer);
1575 spin_unlock(&ide_lock);
1576
1577 if (drive->unmask)
1578 local_irq_enable();
1579 /* service this interrupt, may set handler for next interrupt */
1580 startstop = handler(drive);
1581 spin_lock_irq(&ide_lock);
1582
1583 /*
1584 * Note that handler() may have set things up for another
1585 * interrupt to occur soon, but it cannot happen until
1586 * we exit from this routine, because it will be the
1587 * same irq as is currently being serviced here, and Linux
1588 * won't allow another of the same (on any CPU) until we return.
1589 */
1590 drive->service_time = jiffies - drive->service_start;
1591 if (startstop == ide_stopped) {
1592 if (hwgroup->handler == NULL) { /* paranoia */
1593 hwgroup->busy = 0;
1594 ide_do_request(hwgroup, hwif->irq);
1595 } else {
1596 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler "
1597 "on exit\n", drive->name);
1598 }
1599 }
1600 spin_unlock_irqrestore(&ide_lock, flags);
1601 return IRQ_HANDLED;
1602 }
1603
1604 /**
1605 * ide_init_drive_cmd - initialize a drive command request
1606 * @rq: request object
1607 *
1608 * Initialize a request before we fill it in and send it down to
1609 * ide_do_drive_cmd. Commands must be set up by this function. Right
1610 * now it doesn't do a lot, but if that changes abusers will have a
1611 * nasty suprise.
1612 */
1613
1614 void ide_init_drive_cmd (struct request *rq)
1615 {
1616 memset(rq, 0, sizeof(*rq));
1617 rq->flags = REQ_DRIVE_CMD;
1618 rq->ref_count = 1;
1619 }
1620
1621 EXPORT_SYMBOL(ide_init_drive_cmd);
1622
1623 /**
1624 * ide_do_drive_cmd - issue IDE special command
1625 * @drive: device to issue command
1626 * @rq: request to issue
1627 * @action: action for processing
1628 *
1629 * This function issues a special IDE device request
1630 * onto the request queue.
1631 *
1632 * If action is ide_wait, then the rq is queued at the end of the
1633 * request queue, and the function sleeps until it has been processed.
1634 * This is for use when invoked from an ioctl handler.
1635 *
1636 * If action is ide_preempt, then the rq is queued at the head of
1637 * the request queue, displacing the currently-being-processed
1638 * request and this function returns immediately without waiting
1639 * for the new rq to be completed. This is VERY DANGEROUS, and is
1640 * intended for careful use by the ATAPI tape/cdrom driver code.
1641 *
1642 * If action is ide_end, then the rq is queued at the end of the
1643 * request queue, and the function returns immediately without waiting
1644 * for the new rq to be completed. This is again intended for careful
1645 * use by the ATAPI tape/cdrom driver code.
1646 */
1647
1648 int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action)
1649 {
1650 unsigned long flags;
1651 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1652 DECLARE_COMPLETION(wait);
1653 int where = ELEVATOR_INSERT_BACK, err;
1654 int must_wait = (action == ide_wait || action == ide_head_wait);
1655
1656 rq->errors = 0;
1657 rq->rq_status = RQ_ACTIVE;
1658
1659 /*
1660 * we need to hold an extra reference to request for safe inspection
1661 * after completion
1662 */
1663 if (must_wait) {
1664 rq->ref_count++;
1665 rq->waiting = &wait;
1666 rq->end_io = blk_end_sync_rq;
1667 }
1668
1669 spin_lock_irqsave(&ide_lock, flags);
1670 if (action == ide_preempt)
1671 hwgroup->rq = NULL;
1672 if (action == ide_preempt || action == ide_head_wait) {
1673 where = ELEVATOR_INSERT_FRONT;
1674 rq->flags |= REQ_PREEMPT;
1675 }
1676 __elv_add_request(drive->queue, rq, where, 0);
1677 ide_do_request(hwgroup, IDE_NO_IRQ);
1678 spin_unlock_irqrestore(&ide_lock, flags);
1679
1680 err = 0;
1681 if (must_wait) {
1682 wait_for_completion(&wait);
1683 rq->waiting = NULL;
1684 if (rq->errors)
1685 err = -EIO;
1686
1687 blk_put_request(rq);
1688 }
1689
1690 return err;
1691 }
1692
1693 EXPORT_SYMBOL(ide_do_drive_cmd);