X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=drivers%2Fide%2Fide-io.c;h=a4aa4bf847382ddcfc30ffe06cf5a02d69e3054c;hb=f974b196f58fe042c7b2b4c0ee15d5a6112dbf40;hp=e85060164203f9468e6d02c6bc8011ff298e821b;hpb=d15a613ba01ff2b209ecad7a38ccbb23b3b06c92;p=mirror_ubuntu-kernels.git diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index e85060164203..a4aa4bf84738 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include #include @@ -55,25 +54,9 @@ #include #include -static int __ide_end_request(ide_drive_t *drive, struct request *rq, - int uptodate, unsigned int nr_bytes, int dequeue) +int ide_end_rq(ide_drive_t *drive, struct request *rq, int error, + unsigned int nr_bytes) { - int ret = 1; - int error = 0; - - if (uptodate <= 0) - error = uptodate ? uptodate : -EIO; - - /* - * if failfast is set on a request, override number of sectors and - * complete the whole request right now - */ - if (blk_noretry_request(rq) && error) - nr_bytes = rq->hard_nr_sectors << 9; - - if (!blk_fs_request(rq) && error && !rq->errors) - rq->errors = -EIO; - /* * decide whether to reenable DMA -- 3 is a random magic for now, * if we DMA timeout more than 3 times, just stay in PIO @@ -84,14 +67,9 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq, ide_dma_on(drive); } - if (!blk_end_request(rq, error, nr_bytes)) - ret = 0; - - if (ret == 0 && dequeue) - drive->hwif->rq = NULL; - - return ret; + return blk_end_request(rq, error, nr_bytes); } +EXPORT_SYMBOL_GPL(ide_end_rq); /** * ide_end_request - complete an IDE I/O @@ -108,6 +86,7 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) { unsigned int nr_bytes = nr_sectors << 9; struct request *rq = drive->hwif->rq; + int rc, error = 0; if (!nr_bytes) { if (blk_pc_request(rq)) @@ -116,95 +95,83 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) nr_bytes = rq->hard_cur_sectors << 9; } - return __ide_end_request(drive, rq, uptodate, nr_bytes, 1); + /* + * if failfast is set on a request, override number of sectors + * and complete the whole request right now + */ + if (blk_noretry_request(rq) && uptodate <= 0) + nr_bytes = rq->hard_nr_sectors << 9; + + if (uptodate <= 0) + error = uptodate ? uptodate : -EIO; + + rc = ide_end_rq(drive, rq, error, nr_bytes); + if (rc == 0) + drive->hwif->rq = NULL; + + return rc; } EXPORT_SYMBOL(ide_end_request); -/** - * ide_end_dequeued_request - complete an IDE I/O - * @drive: IDE device for the I/O - * @uptodate: - * @nr_sectors: number of sectors completed - * - * Complete an I/O that is no longer on the request queue. This - * typically occurs when we pull the request and issue a REQUEST_SENSE. - * We must still finish the old request but we must not tamper with the - * queue in the meantime. - * - * NOTE: This path does not handle barrier, but barrier is not supported - * on ide-cd anyway. - */ - -int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, - int uptodate, int nr_sectors) +void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err) { - BUG_ON(!blk_rq_started(rq)); + struct ide_taskfile *tf = &cmd->tf; + struct request *rq = cmd->rq; + u8 tf_cmd = tf->command; + + tf->error = err; + tf->status = stat; + + drive->hwif->tp_ops->tf_read(drive, cmd); + + if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) && + tf_cmd == ATA_CMD_IDLEIMMEDIATE) { + if (tf->lbal != 0xc4) { + printk(KERN_ERR "%s: head unload failed!\n", + drive->name); + ide_tf_dump(drive->name, tf); + } else + drive->dev_flags |= IDE_DFLAG_PARKED; + } - return __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0); + if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) + memcpy(rq->special, cmd, sizeof(*cmd)); + + if (cmd->tf_flags & IDE_TFLAG_DYN) + kfree(cmd); } -EXPORT_SYMBOL_GPL(ide_end_dequeued_request); -/** - * ide_end_drive_cmd - end an explicit drive command - * @drive: command - * @stat: status bits - * @err: error bits - * - * Clean up after success/failure of an explicit drive command. - * These get thrown onto the queue so they are synchronized with - * real I/O operations on the drive. - * - * In LBA48 mode we have to read the register set twice to get - * all the extra information out. - */ - -void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) +int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes) { ide_hwif_t *hwif = drive->hwif; struct request *rq = hwif->rq; + int rc; - if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { - ide_task_t *task = (ide_task_t *)rq->special; - - if (task) { - struct ide_taskfile *tf = &task->tf; - - tf->error = err; - tf->status = stat; - - drive->hwif->tp_ops->tf_read(drive, task); - - if (task->tf_flags & IDE_TFLAG_DYN) - kfree(task); - } - } else if (blk_pm_request(rq)) { - struct request_pm_state *pm = rq->data; - - ide_complete_power_step(drive, rq); - if (pm->pm_step == IDE_PM_COMPLETED) - ide_complete_pm_request(drive, rq); - return; - } - - hwif->rq = NULL; - - rq->errors = err; + rc = blk_end_request(rq, error, nr_bytes); + if (rc == 0) + hwif->rq = NULL; - if (unlikely(blk_end_request(rq, (rq->errors ? -EIO : 0), - blk_rq_bytes(rq)))) - BUG(); + return rc; } -EXPORT_SYMBOL(ide_end_drive_cmd); +EXPORT_SYMBOL(ide_complete_rq); void ide_kill_rq(ide_drive_t *drive, struct request *rq) { - if (rq->rq_disk) { - struct ide_driver *drv; + u8 drv_req = blk_special_request(rq) && rq->rq_disk; + u8 media = drive->media; - drv = *(struct ide_driver **)rq->rq_disk->private_data; - drv->end_request(drive, 0, 0); - } else + drive->failed_pc = NULL; + + if ((media == ide_floppy || media == ide_tape) && drv_req) { + rq->errors = 0; + ide_complete_rq(drive, 0, blk_rq_bytes(rq)); + } else { + if (media == ide_tape) + rq->errors = IDE_DRV_ERROR_GENERAL; + else if (blk_fs_request(rq) == 0 && rq->errors == 0) + rq->errors = -EIO; ide_end_request(drive, 0, 0); + } } static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) @@ -232,20 +199,20 @@ static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) static ide_startstop_t ide_disk_special(ide_drive_t *drive) { special_t *s = &drive->special; - ide_task_t args; + struct ide_cmd cmd; - memset(&args, 0, sizeof(ide_task_t)); - args.data_phase = TASKFILE_NO_DATA; + memset(&cmd, 0, sizeof(cmd)); + cmd.protocol = ATA_PROT_NODATA; if (s->b.set_geometry) { s->b.set_geometry = 0; - ide_tf_set_specify_cmd(drive, &args.tf); + ide_tf_set_specify_cmd(drive, &cmd.tf); } else if (s->b.recalibrate) { s->b.recalibrate = 0; - ide_tf_set_restore_cmd(drive, &args.tf); + ide_tf_set_restore_cmd(drive, &cmd.tf); } else if (s->b.set_multmode) { s->b.set_multmode = 0; - ide_tf_set_setmult_cmd(drive, &args.tf); + ide_tf_set_setmult_cmd(drive, &cmd.tf); } else if (s->all) { int special = s->all; s->all = 0; @@ -253,10 +220,10 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive) return ide_stopped; } - args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE | - IDE_TFLAG_CUSTOM_HANDLER; + cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE | + IDE_TFLAG_CUSTOM_HANDLER; - do_rw_taskfile(drive, &args); + do_rw_taskfile(drive, &cmd); return ide_started; } @@ -289,30 +256,26 @@ static ide_startstop_t do_special (ide_drive_t *drive) void ide_map_sg(ide_drive_t *drive, struct request *rq) { ide_hwif_t *hwif = drive->hwif; + struct ide_cmd *cmd = &hwif->cmd; struct scatterlist *sg = hwif->sg_table; if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); - hwif->sg_nents = 1; + cmd->sg_nents = 1; } else if (!rq->bio) { sg_init_one(sg, rq->data, rq->data_len); - hwif->sg_nents = 1; - } else { - hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); - } + cmd->sg_nents = 1; + } else + cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); } - EXPORT_SYMBOL_GPL(ide_map_sg); -void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq) +void ide_init_sg_cmd(struct ide_cmd *cmd, int nsect) { - ide_hwif_t *hwif = drive->hwif; - - hwif->nsect = hwif->nleft = rq->nr_sectors; - hwif->cursg_ofs = 0; - hwif->cursg = NULL; + cmd->nsect = cmd->nleft = nsect; + cmd->cursg_ofs = 0; + cmd->cursg = NULL; } - EXPORT_SYMBOL_GPL(ide_init_sg_cmd); /** @@ -330,24 +293,15 @@ EXPORT_SYMBOL_GPL(ide_init_sg_cmd); static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq) { - ide_hwif_t *hwif = drive->hwif; - ide_task_t *task = rq->special; - - if (task) { - hwif->data_phase = task->data_phase; + struct ide_cmd *cmd = rq->special; - switch (hwif->data_phase) { - case TASKFILE_MULTI_OUT: - case TASKFILE_OUT: - case TASKFILE_MULTI_IN: - case TASKFILE_IN: - ide_init_sg_cmd(drive, rq); + if (cmd) { + if (cmd->protocol == ATA_PROT_PIO) { + ide_init_sg_cmd(cmd, rq->nr_sectors); ide_map_sg(drive, rq); - default: - break; } - return do_rw_taskfile(drive, task); + return do_rw_taskfile(drive, cmd); } /* @@ -357,8 +311,8 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, #ifdef DEBUG printk("%s: DRIVE_CMD (null)\n", drive->name); #endif - ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif), - ide_read_error(drive)); + rq->errors = 0; + ide_complete_rq(drive, 0, blk_rq_bytes(rq)); return ide_stopped; } @@ -376,9 +330,7 @@ static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) case REQ_DRIVE_RESET: return ide_do_reset(drive); default: - blk_dump_rq_flags(rq, "ide_special_rq - bad request"); - ide_end_request(drive, 0, 0); - return ide_stopped; + BUG(); } } @@ -438,7 +390,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) startstop = ide_start_power_step(drive, rq); if (startstop == ide_stopped && pm->pm_step == IDE_PM_COMPLETED) - ide_complete_pm_request(drive, rq); + ide_complete_pm_rq(drive, rq); return startstop; } else if (!rq->rq_disk && blk_special_request(rq)) /* @@ -501,8 +453,8 @@ static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif) if (host->host_flags & IDE_HFLAG_SERIALIZE) { rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy); if (rc == 0) { - /* for atari only */ - ide_get_lock(ide_intr, hwif); + if (host->get_lock) + host->get_lock(ide_intr, hwif); } } return rc; @@ -511,8 +463,8 @@ static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif) static inline void ide_unlock_host(struct ide_host *host) { if (host->host_flags & IDE_HFLAG_SERIALIZE) { - /* for atari only */ - ide_release_lock(); + if (host->release_lock) + host->release_lock(); clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy); } } @@ -841,6 +793,7 @@ static void unexpected_intr(int irq, ide_hwif_t *hwif) irqreturn_t ide_intr (int irq, void *dev_id) { ide_hwif_t *hwif = (ide_hwif_t *)dev_id; + struct ide_host *host = hwif->host; ide_drive_t *uninitialized_var(drive); ide_handler_t *handler; unsigned long flags; @@ -848,8 +801,8 @@ irqreturn_t ide_intr (int irq, void *dev_id) irqreturn_t irq_ret = IRQ_NONE; int plug_device = 0; - if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE) { - if (hwif != hwif->host->cur_port) + if (host->host_flags & IDE_HFLAG_SERIALIZE) { + if (hwif != host->cur_port) goto out_early; } @@ -872,27 +825,19 @@ irqreturn_t ide_intr (int irq, void *dev_id) * * For PCI, we cannot tell the difference, * so in that case we just ignore it and hope it goes away. - * - * FIXME: unexpected_intr should be hwif-> then we can - * remove all the ifdef PCI crap */ -#ifdef CONFIG_BLK_DEV_IDEPCI - if (hwif->chipset != ide_pci) -#endif /* CONFIG_BLK_DEV_IDEPCI */ - { + if ((host->irq_flags & IRQF_SHARED) == 0) { /* * Probably not a shared PCI interrupt, * so we can safely try to do something about it: */ unexpected_intr(irq, hwif); -#ifdef CONFIG_BLK_DEV_IDEPCI } else { /* * Whack the status register, just in case * we have a leftover pending IRQ. */ (void)hwif->tp_ops->read_status(hwif); -#endif /* CONFIG_BLK_DEV_IDEPCI */ } goto out; }