]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/ata/libata-core.c
libata: rearrange dmesg info to add full ATA revision
[mirror_ubuntu-bionic-kernel.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5
JG
62#define DRV_VERSION "2.10" /* must be exactly four chars */
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4
LT
74
75static unsigned int ata_unique_id = 1;
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
1da177e4
LT
96MODULE_AUTHOR("Jeff Garzik");
97MODULE_DESCRIPTION("Library module for ATA devices");
98MODULE_LICENSE("GPL");
99MODULE_VERSION(DRV_VERSION);
100
0baab86b 101
1da177e4
LT
102/**
103 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
104 * @tf: Taskfile to convert
105 * @fis: Buffer into which data will output
106 * @pmp: Port multiplier port
107 *
108 * Converts a standard ATA taskfile to a Serial ATA
109 * FIS structure (Register - Host to Device).
110 *
111 * LOCKING:
112 * Inherited from caller.
113 */
114
057ace5e 115void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
116{
117 fis[0] = 0x27; /* Register - Host to Device FIS */
118 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
119 bit 7 indicates Command FIS */
120 fis[2] = tf->command;
121 fis[3] = tf->feature;
122
123 fis[4] = tf->lbal;
124 fis[5] = tf->lbam;
125 fis[6] = tf->lbah;
126 fis[7] = tf->device;
127
128 fis[8] = tf->hob_lbal;
129 fis[9] = tf->hob_lbam;
130 fis[10] = tf->hob_lbah;
131 fis[11] = tf->hob_feature;
132
133 fis[12] = tf->nsect;
134 fis[13] = tf->hob_nsect;
135 fis[14] = 0;
136 fis[15] = tf->ctl;
137
138 fis[16] = 0;
139 fis[17] = 0;
140 fis[18] = 0;
141 fis[19] = 0;
142}
143
144/**
145 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
146 * @fis: Buffer from which data will be input
147 * @tf: Taskfile to output
148 *
e12a1be6 149 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
150 *
151 * LOCKING:
152 * Inherited from caller.
153 */
154
057ace5e 155void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
156{
157 tf->command = fis[2]; /* status */
158 tf->feature = fis[3]; /* error */
159
160 tf->lbal = fis[4];
161 tf->lbam = fis[5];
162 tf->lbah = fis[6];
163 tf->device = fis[7];
164
165 tf->hob_lbal = fis[8];
166 tf->hob_lbam = fis[9];
167 tf->hob_lbah = fis[10];
168
169 tf->nsect = fis[12];
170 tf->hob_nsect = fis[13];
171}
172
8cbd6df1
AL
173static const u8 ata_rw_cmds[] = {
174 /* pio multi */
175 ATA_CMD_READ_MULTI,
176 ATA_CMD_WRITE_MULTI,
177 ATA_CMD_READ_MULTI_EXT,
178 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
179 0,
180 0,
181 0,
182 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
183 /* pio */
184 ATA_CMD_PIO_READ,
185 ATA_CMD_PIO_WRITE,
186 ATA_CMD_PIO_READ_EXT,
187 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
188 0,
189 0,
190 0,
191 0,
8cbd6df1
AL
192 /* dma */
193 ATA_CMD_READ,
194 ATA_CMD_WRITE,
195 ATA_CMD_READ_EXT,
9a3dccc4
TH
196 ATA_CMD_WRITE_EXT,
197 0,
198 0,
199 0,
200 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 201};
1da177e4
LT
202
203/**
8cbd6df1 204 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
205 * @tf: command to examine and configure
206 * @dev: device tf belongs to
1da177e4 207 *
2e9edbf8 208 * Examine the device configuration and tf->flags to calculate
8cbd6df1 209 * the proper read/write commands and protocol to use.
1da177e4
LT
210 *
211 * LOCKING:
212 * caller.
213 */
bd056d7e 214static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 215{
9a3dccc4 216 u8 cmd;
1da177e4 217
9a3dccc4 218 int index, fua, lba48, write;
2e9edbf8 219
9a3dccc4 220 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
221 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
222 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 223
8cbd6df1
AL
224 if (dev->flags & ATA_DFLAG_PIO) {
225 tf->protocol = ATA_PROT_PIO;
9a3dccc4 226 index = dev->multi_count ? 0 : 8;
bd056d7e 227 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
228 /* Unable to use DMA due to host limitation */
229 tf->protocol = ATA_PROT_PIO;
0565c26d 230 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
231 } else {
232 tf->protocol = ATA_PROT_DMA;
9a3dccc4 233 index = 16;
8cbd6df1 234 }
1da177e4 235
9a3dccc4
TH
236 cmd = ata_rw_cmds[index + fua + lba48 + write];
237 if (cmd) {
238 tf->command = cmd;
239 return 0;
240 }
241 return -1;
1da177e4
LT
242}
243
35b649fe
TH
244/**
245 * ata_tf_read_block - Read block address from ATA taskfile
246 * @tf: ATA taskfile of interest
247 * @dev: ATA device @tf belongs to
248 *
249 * LOCKING:
250 * None.
251 *
252 * Read block address from @tf. This function can handle all
253 * three address formats - LBA, LBA48 and CHS. tf->protocol and
254 * flags select the address format to use.
255 *
256 * RETURNS:
257 * Block address read from @tf.
258 */
259u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
260{
261 u64 block = 0;
262
263 if (tf->flags & ATA_TFLAG_LBA) {
264 if (tf->flags & ATA_TFLAG_LBA48) {
265 block |= (u64)tf->hob_lbah << 40;
266 block |= (u64)tf->hob_lbam << 32;
267 block |= tf->hob_lbal << 24;
268 } else
269 block |= (tf->device & 0xf) << 24;
270
271 block |= tf->lbah << 16;
272 block |= tf->lbam << 8;
273 block |= tf->lbal;
274 } else {
275 u32 cyl, head, sect;
276
277 cyl = tf->lbam | (tf->lbah << 8);
278 head = tf->device & 0xf;
279 sect = tf->lbal;
280
281 block = (cyl * dev->heads + head) * dev->sectors + sect;
282 }
283
284 return block;
285}
286
bd056d7e
TH
287/**
288 * ata_build_rw_tf - Build ATA taskfile for given read/write request
289 * @tf: Target ATA taskfile
290 * @dev: ATA device @tf belongs to
291 * @block: Block address
292 * @n_block: Number of blocks
293 * @tf_flags: RW/FUA etc...
294 * @tag: tag
295 *
296 * LOCKING:
297 * None.
298 *
299 * Build ATA taskfile @tf for read/write request described by
300 * @block, @n_block, @tf_flags and @tag on @dev.
301 *
302 * RETURNS:
303 *
304 * 0 on success, -ERANGE if the request is too large for @dev,
305 * -EINVAL if the request is invalid.
306 */
307int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
308 u64 block, u32 n_block, unsigned int tf_flags,
309 unsigned int tag)
310{
311 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
312 tf->flags |= tf_flags;
313
314 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
70e6ad0c
TH
315 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
316 likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
317 /* yay, NCQ */
318 if (!lba_48_ok(block, n_block))
319 return -ERANGE;
320
321 tf->protocol = ATA_PROT_NCQ;
322 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
323
324 if (tf->flags & ATA_TFLAG_WRITE)
325 tf->command = ATA_CMD_FPDMA_WRITE;
326 else
327 tf->command = ATA_CMD_FPDMA_READ;
328
329 tf->nsect = tag << 3;
330 tf->hob_feature = (n_block >> 8) & 0xff;
331 tf->feature = n_block & 0xff;
332
333 tf->hob_lbah = (block >> 40) & 0xff;
334 tf->hob_lbam = (block >> 32) & 0xff;
335 tf->hob_lbal = (block >> 24) & 0xff;
336 tf->lbah = (block >> 16) & 0xff;
337 tf->lbam = (block >> 8) & 0xff;
338 tf->lbal = block & 0xff;
339
340 tf->device = 1 << 6;
341 if (tf->flags & ATA_TFLAG_FUA)
342 tf->device |= 1 << 7;
343 } else if (dev->flags & ATA_DFLAG_LBA) {
344 tf->flags |= ATA_TFLAG_LBA;
345
346 if (lba_28_ok(block, n_block)) {
347 /* use LBA28 */
348 tf->device |= (block >> 24) & 0xf;
349 } else if (lba_48_ok(block, n_block)) {
350 if (!(dev->flags & ATA_DFLAG_LBA48))
351 return -ERANGE;
352
353 /* use LBA48 */
354 tf->flags |= ATA_TFLAG_LBA48;
355
356 tf->hob_nsect = (n_block >> 8) & 0xff;
357
358 tf->hob_lbah = (block >> 40) & 0xff;
359 tf->hob_lbam = (block >> 32) & 0xff;
360 tf->hob_lbal = (block >> 24) & 0xff;
361 } else
362 /* request too large even for LBA48 */
363 return -ERANGE;
364
365 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
366 return -EINVAL;
367
368 tf->nsect = n_block & 0xff;
369
370 tf->lbah = (block >> 16) & 0xff;
371 tf->lbam = (block >> 8) & 0xff;
372 tf->lbal = block & 0xff;
373
374 tf->device |= ATA_LBA;
375 } else {
376 /* CHS */
377 u32 sect, head, cyl, track;
378
379 /* The request -may- be too large for CHS addressing. */
380 if (!lba_28_ok(block, n_block))
381 return -ERANGE;
382
383 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
384 return -EINVAL;
385
386 /* Convert LBA to CHS */
387 track = (u32)block / dev->sectors;
388 cyl = track / dev->heads;
389 head = track % dev->heads;
390 sect = (u32)block % dev->sectors + 1;
391
392 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
393 (u32)block, track, cyl, head, sect);
394
395 /* Check whether the converted CHS can fit.
396 Cylinder: 0-65535
397 Head: 0-15
398 Sector: 1-255*/
399 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
400 return -ERANGE;
401
402 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
403 tf->lbal = sect;
404 tf->lbam = cyl;
405 tf->lbah = cyl >> 8;
406 tf->device |= head;
407 }
408
409 return 0;
410}
411
cb95d562
TH
412/**
413 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
414 * @pio_mask: pio_mask
415 * @mwdma_mask: mwdma_mask
416 * @udma_mask: udma_mask
417 *
418 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
419 * unsigned int xfer_mask.
420 *
421 * LOCKING:
422 * None.
423 *
424 * RETURNS:
425 * Packed xfer_mask.
426 */
427static unsigned int ata_pack_xfermask(unsigned int pio_mask,
428 unsigned int mwdma_mask,
429 unsigned int udma_mask)
430{
431 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
432 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
433 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
434}
435
c0489e4e
TH
436/**
437 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
438 * @xfer_mask: xfer_mask to unpack
439 * @pio_mask: resulting pio_mask
440 * @mwdma_mask: resulting mwdma_mask
441 * @udma_mask: resulting udma_mask
442 *
443 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
444 * Any NULL distination masks will be ignored.
445 */
446static void ata_unpack_xfermask(unsigned int xfer_mask,
447 unsigned int *pio_mask,
448 unsigned int *mwdma_mask,
449 unsigned int *udma_mask)
450{
451 if (pio_mask)
452 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
453 if (mwdma_mask)
454 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
455 if (udma_mask)
456 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
457}
458
cb95d562 459static const struct ata_xfer_ent {
be9a50c8 460 int shift, bits;
cb95d562
TH
461 u8 base;
462} ata_xfer_tbl[] = {
463 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
464 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
465 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
466 { -1, },
467};
468
469/**
470 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
471 * @xfer_mask: xfer_mask of interest
472 *
473 * Return matching XFER_* value for @xfer_mask. Only the highest
474 * bit of @xfer_mask is considered.
475 *
476 * LOCKING:
477 * None.
478 *
479 * RETURNS:
480 * Matching XFER_* value, 0 if no match found.
481 */
482static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
483{
484 int highbit = fls(xfer_mask) - 1;
485 const struct ata_xfer_ent *ent;
486
487 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
488 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
489 return ent->base + highbit - ent->shift;
490 return 0;
491}
492
493/**
494 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
495 * @xfer_mode: XFER_* of interest
496 *
497 * Return matching xfer_mask for @xfer_mode.
498 *
499 * LOCKING:
500 * None.
501 *
502 * RETURNS:
503 * Matching xfer_mask, 0 if no match found.
504 */
505static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
506{
507 const struct ata_xfer_ent *ent;
508
509 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
510 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
511 return 1 << (ent->shift + xfer_mode - ent->base);
512 return 0;
513}
514
515/**
516 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
517 * @xfer_mode: XFER_* of interest
518 *
519 * Return matching xfer_shift for @xfer_mode.
520 *
521 * LOCKING:
522 * None.
523 *
524 * RETURNS:
525 * Matching xfer_shift, -1 if no match found.
526 */
527static int ata_xfer_mode2shift(unsigned int xfer_mode)
528{
529 const struct ata_xfer_ent *ent;
530
531 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
532 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
533 return ent->shift;
534 return -1;
535}
536
1da177e4 537/**
1da7b0d0
TH
538 * ata_mode_string - convert xfer_mask to string
539 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
540 *
541 * Determine string which represents the highest speed
1da7b0d0 542 * (highest bit in @modemask).
1da177e4
LT
543 *
544 * LOCKING:
545 * None.
546 *
547 * RETURNS:
548 * Constant C string representing highest speed listed in
1da7b0d0 549 * @mode_mask, or the constant C string "<n/a>".
1da177e4 550 */
1da7b0d0 551static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 552{
75f554bc
TH
553 static const char * const xfer_mode_str[] = {
554 "PIO0",
555 "PIO1",
556 "PIO2",
557 "PIO3",
558 "PIO4",
b352e57d
AC
559 "PIO5",
560 "PIO6",
75f554bc
TH
561 "MWDMA0",
562 "MWDMA1",
563 "MWDMA2",
b352e57d
AC
564 "MWDMA3",
565 "MWDMA4",
75f554bc
TH
566 "UDMA/16",
567 "UDMA/25",
568 "UDMA/33",
569 "UDMA/44",
570 "UDMA/66",
571 "UDMA/100",
572 "UDMA/133",
573 "UDMA7",
574 };
1da7b0d0 575 int highbit;
1da177e4 576
1da7b0d0
TH
577 highbit = fls(xfer_mask) - 1;
578 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
579 return xfer_mode_str[highbit];
1da177e4 580 return "<n/a>";
1da177e4
LT
581}
582
4c360c81
TH
583static const char *sata_spd_string(unsigned int spd)
584{
585 static const char * const spd_str[] = {
586 "1.5 Gbps",
587 "3.0 Gbps",
588 };
589
590 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
591 return "<unknown>";
592 return spd_str[spd - 1];
593}
594
3373efd8 595void ata_dev_disable(struct ata_device *dev)
0b8efb0a 596{
0dd4b21f 597 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 598 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
0b8efb0a
TH
599 dev->class++;
600 }
601}
602
1da177e4 603/**
0d5ff566 604 * ata_devchk - PATA device presence detection
1da177e4
LT
605 * @ap: ATA channel to examine
606 * @device: Device to examine (starting at zero)
607 *
608 * This technique was originally described in
609 * Hale Landis's ATADRVR (www.ata-atapi.com), and
610 * later found its way into the ATA/ATAPI spec.
611 *
612 * Write a pattern to the ATA shadow registers,
613 * and if a device is present, it will respond by
614 * correctly storing and echoing back the
615 * ATA shadow register contents.
616 *
617 * LOCKING:
618 * caller.
619 */
620
0d5ff566 621static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
622{
623 struct ata_ioports *ioaddr = &ap->ioaddr;
624 u8 nsect, lbal;
625
626 ap->ops->dev_select(ap, device);
627
0d5ff566
TH
628 iowrite8(0x55, ioaddr->nsect_addr);
629 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 630
0d5ff566
TH
631 iowrite8(0xaa, ioaddr->nsect_addr);
632 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 633
0d5ff566
TH
634 iowrite8(0x55, ioaddr->nsect_addr);
635 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 636
0d5ff566
TH
637 nsect = ioread8(ioaddr->nsect_addr);
638 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
639
640 if ((nsect == 0x55) && (lbal == 0xaa))
641 return 1; /* we found a device */
642
643 return 0; /* nothing found */
644}
645
1da177e4
LT
646/**
647 * ata_dev_classify - determine device type based on ATA-spec signature
648 * @tf: ATA taskfile register set for device to be identified
649 *
650 * Determine from taskfile register contents whether a device is
651 * ATA or ATAPI, as per "Signature and persistence" section
652 * of ATA/PI spec (volume 1, sect 5.14).
653 *
654 * LOCKING:
655 * None.
656 *
657 * RETURNS:
658 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
659 * the event of failure.
660 */
661
057ace5e 662unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
663{
664 /* Apple's open source Darwin code hints that some devices only
665 * put a proper signature into the LBA mid/high registers,
666 * So, we only check those. It's sufficient for uniqueness.
667 */
668
669 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
670 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
671 DPRINTK("found ATA device by sig\n");
672 return ATA_DEV_ATA;
673 }
674
675 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
676 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
677 DPRINTK("found ATAPI device by sig\n");
678 return ATA_DEV_ATAPI;
679 }
680
681 DPRINTK("unknown device\n");
682 return ATA_DEV_UNKNOWN;
683}
684
685/**
686 * ata_dev_try_classify - Parse returned ATA device signature
687 * @ap: ATA channel to examine
688 * @device: Device to examine (starting at zero)
b4dc7623 689 * @r_err: Value of error register on completion
1da177e4
LT
690 *
691 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
692 * an ATA/ATAPI-defined set of values is placed in the ATA
693 * shadow registers, indicating the results of device detection
694 * and diagnostics.
695 *
696 * Select the ATA device, and read the values from the ATA shadow
697 * registers. Then parse according to the Error register value,
698 * and the spec-defined values examined by ata_dev_classify().
699 *
700 * LOCKING:
701 * caller.
b4dc7623
TH
702 *
703 * RETURNS:
704 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
705 */
706
b4dc7623
TH
707static unsigned int
708ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 709{
1da177e4
LT
710 struct ata_taskfile tf;
711 unsigned int class;
712 u8 err;
713
714 ap->ops->dev_select(ap, device);
715
716 memset(&tf, 0, sizeof(tf));
717
1da177e4 718 ap->ops->tf_read(ap, &tf);
0169e284 719 err = tf.feature;
b4dc7623
TH
720 if (r_err)
721 *r_err = err;
1da177e4 722
93590859
AC
723 /* see if device passed diags: if master then continue and warn later */
724 if (err == 0 && device == 0)
725 /* diagnostic fail : do nothing _YET_ */
726 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
727 else if (err == 1)
1da177e4
LT
728 /* do nothing */ ;
729 else if ((device == 0) && (err == 0x81))
730 /* do nothing */ ;
731 else
b4dc7623 732 return ATA_DEV_NONE;
1da177e4 733
b4dc7623 734 /* determine if device is ATA or ATAPI */
1da177e4 735 class = ata_dev_classify(&tf);
b4dc7623 736
1da177e4 737 if (class == ATA_DEV_UNKNOWN)
b4dc7623 738 return ATA_DEV_NONE;
1da177e4 739 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
740 return ATA_DEV_NONE;
741 return class;
1da177e4
LT
742}
743
744/**
6a62a04d 745 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
746 * @id: IDENTIFY DEVICE results we will examine
747 * @s: string into which data is output
748 * @ofs: offset into identify device page
749 * @len: length of string to return. must be an even number.
750 *
751 * The strings in the IDENTIFY DEVICE page are broken up into
752 * 16-bit chunks. Run through the string, and output each
753 * 8-bit chunk linearly, regardless of platform.
754 *
755 * LOCKING:
756 * caller.
757 */
758
6a62a04d
TH
759void ata_id_string(const u16 *id, unsigned char *s,
760 unsigned int ofs, unsigned int len)
1da177e4
LT
761{
762 unsigned int c;
763
764 while (len > 0) {
765 c = id[ofs] >> 8;
766 *s = c;
767 s++;
768
769 c = id[ofs] & 0xff;
770 *s = c;
771 s++;
772
773 ofs++;
774 len -= 2;
775 }
776}
777
0e949ff3 778/**
6a62a04d 779 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
780 * @id: IDENTIFY DEVICE results we will examine
781 * @s: string into which data is output
782 * @ofs: offset into identify device page
783 * @len: length of string to return. must be an odd number.
784 *
6a62a04d 785 * This function is identical to ata_id_string except that it
0e949ff3
TH
786 * trims trailing spaces and terminates the resulting string with
787 * null. @len must be actual maximum length (even number) + 1.
788 *
789 * LOCKING:
790 * caller.
791 */
6a62a04d
TH
792void ata_id_c_string(const u16 *id, unsigned char *s,
793 unsigned int ofs, unsigned int len)
0e949ff3
TH
794{
795 unsigned char *p;
796
797 WARN_ON(!(len & 1));
798
6a62a04d 799 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
800
801 p = s + strnlen(s, len - 1);
802 while (p > s && p[-1] == ' ')
803 p--;
804 *p = '\0';
805}
0baab86b 806
2940740b
TH
807static u64 ata_id_n_sectors(const u16 *id)
808{
809 if (ata_id_has_lba(id)) {
810 if (ata_id_has_lba48(id))
811 return ata_id_u64(id, 100);
812 else
813 return ata_id_u32(id, 60);
814 } else {
815 if (ata_id_current_chs_valid(id))
816 return ata_id_u32(id, 57);
817 else
818 return id[1] * id[3] * id[6];
819 }
820}
821
0baab86b
EF
822/**
823 * ata_noop_dev_select - Select device 0/1 on ATA bus
824 * @ap: ATA channel to manipulate
825 * @device: ATA device (numbered from zero) to select
826 *
827 * This function performs no actual function.
828 *
829 * May be used as the dev_select() entry in ata_port_operations.
830 *
831 * LOCKING:
832 * caller.
833 */
1da177e4
LT
834void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
835{
836}
837
0baab86b 838
1da177e4
LT
839/**
840 * ata_std_dev_select - Select device 0/1 on ATA bus
841 * @ap: ATA channel to manipulate
842 * @device: ATA device (numbered from zero) to select
843 *
844 * Use the method defined in the ATA specification to
845 * make either device 0, or device 1, active on the
0baab86b
EF
846 * ATA channel. Works with both PIO and MMIO.
847 *
848 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
849 *
850 * LOCKING:
851 * caller.
852 */
853
854void ata_std_dev_select (struct ata_port *ap, unsigned int device)
855{
856 u8 tmp;
857
858 if (device == 0)
859 tmp = ATA_DEVICE_OBS;
860 else
861 tmp = ATA_DEVICE_OBS | ATA_DEV1;
862
0d5ff566 863 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
864 ata_pause(ap); /* needed; also flushes, for mmio */
865}
866
867/**
868 * ata_dev_select - Select device 0/1 on ATA bus
869 * @ap: ATA channel to manipulate
870 * @device: ATA device (numbered from zero) to select
871 * @wait: non-zero to wait for Status register BSY bit to clear
872 * @can_sleep: non-zero if context allows sleeping
873 *
874 * Use the method defined in the ATA specification to
875 * make either device 0, or device 1, active on the
876 * ATA channel.
877 *
878 * This is a high-level version of ata_std_dev_select(),
879 * which additionally provides the services of inserting
880 * the proper pauses and status polling, where needed.
881 *
882 * LOCKING:
883 * caller.
884 */
885
886void ata_dev_select(struct ata_port *ap, unsigned int device,
887 unsigned int wait, unsigned int can_sleep)
888{
88574551 889 if (ata_msg_probe(ap))
0dd4b21f 890 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
88574551 891 "device %u, wait %u\n", ap->id, device, wait);
1da177e4
LT
892
893 if (wait)
894 ata_wait_idle(ap);
895
896 ap->ops->dev_select(ap, device);
897
898 if (wait) {
899 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
900 msleep(150);
901 ata_wait_idle(ap);
902 }
903}
904
905/**
906 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 907 * @id: IDENTIFY DEVICE page to dump
1da177e4 908 *
0bd3300a
TH
909 * Dump selected 16-bit words from the given IDENTIFY DEVICE
910 * page.
1da177e4
LT
911 *
912 * LOCKING:
913 * caller.
914 */
915
0bd3300a 916static inline void ata_dump_id(const u16 *id)
1da177e4
LT
917{
918 DPRINTK("49==0x%04x "
919 "53==0x%04x "
920 "63==0x%04x "
921 "64==0x%04x "
922 "75==0x%04x \n",
0bd3300a
TH
923 id[49],
924 id[53],
925 id[63],
926 id[64],
927 id[75]);
1da177e4
LT
928 DPRINTK("80==0x%04x "
929 "81==0x%04x "
930 "82==0x%04x "
931 "83==0x%04x "
932 "84==0x%04x \n",
0bd3300a
TH
933 id[80],
934 id[81],
935 id[82],
936 id[83],
937 id[84]);
1da177e4
LT
938 DPRINTK("88==0x%04x "
939 "93==0x%04x\n",
0bd3300a
TH
940 id[88],
941 id[93]);
1da177e4
LT
942}
943
cb95d562
TH
944/**
945 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
946 * @id: IDENTIFY data to compute xfer mask from
947 *
948 * Compute the xfermask for this device. This is not as trivial
949 * as it seems if we must consider early devices correctly.
950 *
951 * FIXME: pre IDE drive timing (do we care ?).
952 *
953 * LOCKING:
954 * None.
955 *
956 * RETURNS:
957 * Computed xfermask
958 */
959static unsigned int ata_id_xfermask(const u16 *id)
960{
961 unsigned int pio_mask, mwdma_mask, udma_mask;
962
963 /* Usual case. Word 53 indicates word 64 is valid */
964 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
965 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
966 pio_mask <<= 3;
967 pio_mask |= 0x7;
968 } else {
969 /* If word 64 isn't valid then Word 51 high byte holds
970 * the PIO timing number for the maximum. Turn it into
971 * a mask.
972 */
7a0f1c8a 973 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
974 if (mode < 5) /* Valid PIO range */
975 pio_mask = (2 << mode) - 1;
976 else
977 pio_mask = 1;
cb95d562
TH
978
979 /* But wait.. there's more. Design your standards by
980 * committee and you too can get a free iordy field to
981 * process. However its the speeds not the modes that
982 * are supported... Note drivers using the timing API
983 * will get this right anyway
984 */
985 }
986
987 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 988
b352e57d
AC
989 if (ata_id_is_cfa(id)) {
990 /*
991 * Process compact flash extended modes
992 */
993 int pio = id[163] & 0x7;
994 int dma = (id[163] >> 3) & 7;
995
996 if (pio)
997 pio_mask |= (1 << 5);
998 if (pio > 1)
999 pio_mask |= (1 << 6);
1000 if (dma)
1001 mwdma_mask |= (1 << 3);
1002 if (dma > 1)
1003 mwdma_mask |= (1 << 4);
1004 }
1005
fb21f0d0
TH
1006 udma_mask = 0;
1007 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1008 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1009
1010 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1011}
1012
86e45b6b
TH
1013/**
1014 * ata_port_queue_task - Queue port_task
1015 * @ap: The ata_port to queue port_task for
e2a7f77a 1016 * @fn: workqueue function to be scheduled
65f27f38 1017 * @data: data for @fn to use
e2a7f77a 1018 * @delay: delay time for workqueue function
86e45b6b
TH
1019 *
1020 * Schedule @fn(@data) for execution after @delay jiffies using
1021 * port_task. There is one port_task per port and it's the
1022 * user(low level driver)'s responsibility to make sure that only
1023 * one task is active at any given time.
1024 *
1025 * libata core layer takes care of synchronization between
1026 * port_task and EH. ata_port_queue_task() may be ignored for EH
1027 * synchronization.
1028 *
1029 * LOCKING:
1030 * Inherited from caller.
1031 */
65f27f38 1032void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1033 unsigned long delay)
1034{
1035 int rc;
1036
b51e9e5d 1037 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1038 return;
1039
65f27f38
DH
1040 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1041 ap->port_task_data = data;
86e45b6b 1042
52bad64d 1043 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1044
1045 /* rc == 0 means that another user is using port task */
1046 WARN_ON(rc == 0);
1047}
1048
1049/**
1050 * ata_port_flush_task - Flush port_task
1051 * @ap: The ata_port to flush port_task for
1052 *
1053 * After this function completes, port_task is guranteed not to
1054 * be running or scheduled.
1055 *
1056 * LOCKING:
1057 * Kernel thread context (may sleep)
1058 */
1059void ata_port_flush_task(struct ata_port *ap)
1060{
1061 unsigned long flags;
1062
1063 DPRINTK("ENTER\n");
1064
ba6a1308 1065 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1066 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1067 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1068
1069 DPRINTK("flush #1\n");
1070 flush_workqueue(ata_wq);
1071
1072 /*
1073 * At this point, if a task is running, it's guaranteed to see
1074 * the FLUSH flag; thus, it will never queue pio tasks again.
1075 * Cancel and flush.
1076 */
1077 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1078 if (ata_msg_ctl(ap))
88574551
TH
1079 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1080 __FUNCTION__);
86e45b6b
TH
1081 flush_workqueue(ata_wq);
1082 }
1083
ba6a1308 1084 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1085 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1086 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1087
0dd4b21f
BP
1088 if (ata_msg_ctl(ap))
1089 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1090}
1091
7102d230 1092static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1093{
77853bf2 1094 struct completion *waiting = qc->private_data;
a2a7a662 1095
a2a7a662 1096 complete(waiting);
a2a7a662
TH
1097}
1098
1099/**
2432697b 1100 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1101 * @dev: Device to which the command is sent
1102 * @tf: Taskfile registers for the command and the result
d69cf37d 1103 * @cdb: CDB for packet command
a2a7a662 1104 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1105 * @sg: sg list for the data buffer of the command
1106 * @n_elem: Number of sg entries
a2a7a662
TH
1107 *
1108 * Executes libata internal command with timeout. @tf contains
1109 * command on entry and result on return. Timeout and error
1110 * conditions are reported via return value. No recovery action
1111 * is taken after a command times out. It's caller's duty to
1112 * clean up after timeout.
1113 *
1114 * LOCKING:
1115 * None. Should be called with kernel context, might sleep.
551e8889
TH
1116 *
1117 * RETURNS:
1118 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1119 */
2432697b
TH
1120unsigned ata_exec_internal_sg(struct ata_device *dev,
1121 struct ata_taskfile *tf, const u8 *cdb,
1122 int dma_dir, struct scatterlist *sg,
1123 unsigned int n_elem)
a2a7a662 1124{
3373efd8 1125 struct ata_port *ap = dev->ap;
a2a7a662
TH
1126 u8 command = tf->command;
1127 struct ata_queued_cmd *qc;
2ab7db1f 1128 unsigned int tag, preempted_tag;
dedaf2b0 1129 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1130 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1131 unsigned long flags;
77853bf2 1132 unsigned int err_mask;
d95a717f 1133 int rc;
a2a7a662 1134
ba6a1308 1135 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1136
e3180499 1137 /* no internal command while frozen */
b51e9e5d 1138 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1139 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1140 return AC_ERR_SYSTEM;
1141 }
1142
2ab7db1f 1143 /* initialize internal qc */
a2a7a662 1144
2ab7db1f
TH
1145 /* XXX: Tag 0 is used for drivers with legacy EH as some
1146 * drivers choke if any other tag is given. This breaks
1147 * ata_tag_internal() test for those drivers. Don't use new
1148 * EH stuff without converting to it.
1149 */
1150 if (ap->ops->error_handler)
1151 tag = ATA_TAG_INTERNAL;
1152 else
1153 tag = 0;
1154
6cec4a39 1155 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1156 BUG();
f69499f4 1157 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1158
1159 qc->tag = tag;
1160 qc->scsicmd = NULL;
1161 qc->ap = ap;
1162 qc->dev = dev;
1163 ata_qc_reinit(qc);
1164
1165 preempted_tag = ap->active_tag;
dedaf2b0
TH
1166 preempted_sactive = ap->sactive;
1167 preempted_qc_active = ap->qc_active;
2ab7db1f 1168 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1169 ap->sactive = 0;
1170 ap->qc_active = 0;
2ab7db1f
TH
1171
1172 /* prepare & issue qc */
a2a7a662 1173 qc->tf = *tf;
d69cf37d
TH
1174 if (cdb)
1175 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1176 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1177 qc->dma_dir = dma_dir;
1178 if (dma_dir != DMA_NONE) {
2432697b
TH
1179 unsigned int i, buflen = 0;
1180
1181 for (i = 0; i < n_elem; i++)
1182 buflen += sg[i].length;
1183
1184 ata_sg_init(qc, sg, n_elem);
49c80429 1185 qc->nbytes = buflen;
a2a7a662
TH
1186 }
1187
77853bf2 1188 qc->private_data = &wait;
a2a7a662
TH
1189 qc->complete_fn = ata_qc_complete_internal;
1190
8e0e694a 1191 ata_qc_issue(qc);
a2a7a662 1192
ba6a1308 1193 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1194
a8601e5f 1195 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1196
1197 ata_port_flush_task(ap);
41ade50c 1198
d95a717f 1199 if (!rc) {
ba6a1308 1200 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1201
1202 /* We're racing with irq here. If we lose, the
1203 * following test prevents us from completing the qc
d95a717f
TH
1204 * twice. If we win, the port is frozen and will be
1205 * cleaned up by ->post_internal_cmd().
a2a7a662 1206 */
77853bf2 1207 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1208 qc->err_mask |= AC_ERR_TIMEOUT;
1209
1210 if (ap->ops->error_handler)
1211 ata_port_freeze(ap);
1212 else
1213 ata_qc_complete(qc);
f15a1daf 1214
0dd4b21f
BP
1215 if (ata_msg_warn(ap))
1216 ata_dev_printk(dev, KERN_WARNING,
88574551 1217 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1218 }
1219
ba6a1308 1220 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1221 }
1222
d95a717f
TH
1223 /* do post_internal_cmd */
1224 if (ap->ops->post_internal_cmd)
1225 ap->ops->post_internal_cmd(qc);
1226
18d90deb 1227 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
0dd4b21f 1228 if (ata_msg_warn(ap))
88574551 1229 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1230 "zero err_mask for failed "
88574551 1231 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1232 qc->err_mask |= AC_ERR_OTHER;
1233 }
1234
15869303 1235 /* finish up */
ba6a1308 1236 spin_lock_irqsave(ap->lock, flags);
15869303 1237
e61e0672 1238 *tf = qc->result_tf;
77853bf2
TH
1239 err_mask = qc->err_mask;
1240
1241 ata_qc_free(qc);
2ab7db1f 1242 ap->active_tag = preempted_tag;
dedaf2b0
TH
1243 ap->sactive = preempted_sactive;
1244 ap->qc_active = preempted_qc_active;
77853bf2 1245
1f7dd3e9
TH
1246 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1247 * Until those drivers are fixed, we detect the condition
1248 * here, fail the command with AC_ERR_SYSTEM and reenable the
1249 * port.
1250 *
1251 * Note that this doesn't change any behavior as internal
1252 * command failure results in disabling the device in the
1253 * higher layer for LLDDs without new reset/EH callbacks.
1254 *
1255 * Kill the following code as soon as those drivers are fixed.
1256 */
198e0fed 1257 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1258 err_mask |= AC_ERR_SYSTEM;
1259 ata_port_probe(ap);
1260 }
1261
ba6a1308 1262 spin_unlock_irqrestore(ap->lock, flags);
15869303 1263
77853bf2 1264 return err_mask;
a2a7a662
TH
1265}
1266
2432697b 1267/**
33480a0e 1268 * ata_exec_internal - execute libata internal command
2432697b
TH
1269 * @dev: Device to which the command is sent
1270 * @tf: Taskfile registers for the command and the result
1271 * @cdb: CDB for packet command
1272 * @dma_dir: Data tranfer direction of the command
1273 * @buf: Data buffer of the command
1274 * @buflen: Length of data buffer
1275 *
1276 * Wrapper around ata_exec_internal_sg() which takes simple
1277 * buffer instead of sg list.
1278 *
1279 * LOCKING:
1280 * None. Should be called with kernel context, might sleep.
1281 *
1282 * RETURNS:
1283 * Zero on success, AC_ERR_* mask on failure
1284 */
1285unsigned ata_exec_internal(struct ata_device *dev,
1286 struct ata_taskfile *tf, const u8 *cdb,
1287 int dma_dir, void *buf, unsigned int buflen)
1288{
33480a0e
TH
1289 struct scatterlist *psg = NULL, sg;
1290 unsigned int n_elem = 0;
2432697b 1291
33480a0e
TH
1292 if (dma_dir != DMA_NONE) {
1293 WARN_ON(!buf);
1294 sg_init_one(&sg, buf, buflen);
1295 psg = &sg;
1296 n_elem++;
1297 }
2432697b 1298
33480a0e 1299 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1300}
1301
977e6b9f
TH
1302/**
1303 * ata_do_simple_cmd - execute simple internal command
1304 * @dev: Device to which the command is sent
1305 * @cmd: Opcode to execute
1306 *
1307 * Execute a 'simple' command, that only consists of the opcode
1308 * 'cmd' itself, without filling any other registers
1309 *
1310 * LOCKING:
1311 * Kernel thread context (may sleep).
1312 *
1313 * RETURNS:
1314 * Zero on success, AC_ERR_* mask on failure
e58eb583 1315 */
77b08fb5 1316unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1317{
1318 struct ata_taskfile tf;
e58eb583
TH
1319
1320 ata_tf_init(dev, &tf);
1321
1322 tf.command = cmd;
1323 tf.flags |= ATA_TFLAG_DEVICE;
1324 tf.protocol = ATA_PROT_NODATA;
1325
977e6b9f 1326 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1327}
1328
1bc4ccff
AC
1329/**
1330 * ata_pio_need_iordy - check if iordy needed
1331 * @adev: ATA device
1332 *
1333 * Check if the current speed of the device requires IORDY. Used
1334 * by various controllers for chip configuration.
1335 */
1336
1337unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1338{
1339 int pio;
1340 int speed = adev->pio_mode - XFER_PIO_0;
1341
1342 if (speed < 2)
1343 return 0;
1344 if (speed > 2)
1345 return 1;
2e9edbf8 1346
1bc4ccff
AC
1347 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1348
1349 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1350 pio = adev->id[ATA_ID_EIDE_PIO];
1351 /* Is the speed faster than the drive allows non IORDY ? */
1352 if (pio) {
1353 /* This is cycle times not frequency - watch the logic! */
1354 if (pio > 240) /* PIO2 is 240nS per cycle */
1355 return 1;
1356 return 0;
1357 }
1358 }
1359 return 0;
1360}
1361
1da177e4 1362/**
49016aca 1363 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1364 * @dev: target device
1365 * @p_class: pointer to class of the target device (may be changed)
bff04647 1366 * @flags: ATA_READID_* flags
fe635c7e 1367 * @id: buffer to read IDENTIFY data into
1da177e4 1368 *
49016aca
TH
1369 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1370 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1371 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1372 * for pre-ATA4 drives.
1da177e4
LT
1373 *
1374 * LOCKING:
49016aca
TH
1375 * Kernel thread context (may sleep)
1376 *
1377 * RETURNS:
1378 * 0 on success, -errno otherwise.
1da177e4 1379 */
a9beec95 1380int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1381 unsigned int flags, u16 *id)
1da177e4 1382{
3373efd8 1383 struct ata_port *ap = dev->ap;
49016aca 1384 unsigned int class = *p_class;
a0123703 1385 struct ata_taskfile tf;
49016aca
TH
1386 unsigned int err_mask = 0;
1387 const char *reason;
1388 int rc;
1da177e4 1389
0dd4b21f 1390 if (ata_msg_ctl(ap))
88574551
TH
1391 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1392 __FUNCTION__, ap->id, dev->devno);
1da177e4 1393
49016aca 1394 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1395
49016aca 1396 retry:
3373efd8 1397 ata_tf_init(dev, &tf);
a0123703 1398
49016aca
TH
1399 switch (class) {
1400 case ATA_DEV_ATA:
a0123703 1401 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1402 break;
1403 case ATA_DEV_ATAPI:
a0123703 1404 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1405 break;
1406 default:
1407 rc = -ENODEV;
1408 reason = "unsupported class";
1409 goto err_out;
1da177e4
LT
1410 }
1411
a0123703 1412 tf.protocol = ATA_PROT_PIO;
800b3996 1413 tf.flags |= ATA_TFLAG_POLLING; /* for polling presence detection */
1da177e4 1414
3373efd8 1415 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1416 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1417 if (err_mask) {
800b3996 1418 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8
TH
1419 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1420 ap->id, dev->devno);
1421 return -ENOENT;
1422 }
1423
49016aca
TH
1424 rc = -EIO;
1425 reason = "I/O error";
1da177e4
LT
1426 goto err_out;
1427 }
1428
49016aca 1429 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1430
49016aca 1431 /* sanity check */
a4f5749b
TH
1432 rc = -EINVAL;
1433 reason = "device reports illegal type";
1434
1435 if (class == ATA_DEV_ATA) {
1436 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1437 goto err_out;
1438 } else {
1439 if (ata_id_is_ata(id))
1440 goto err_out;
49016aca
TH
1441 }
1442
bff04647 1443 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1444 /*
1445 * The exact sequence expected by certain pre-ATA4 drives is:
1446 * SRST RESET
1447 * IDENTIFY
1448 * INITIALIZE DEVICE PARAMETERS
1449 * anything else..
1450 * Some drives were very specific about that exact sequence.
1451 */
1452 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1453 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1454 if (err_mask) {
1455 rc = -EIO;
1456 reason = "INIT_DEV_PARAMS failed";
1457 goto err_out;
1458 }
1459
1460 /* current CHS translation info (id[53-58]) might be
1461 * changed. reread the identify device info.
1462 */
bff04647 1463 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1464 goto retry;
1465 }
1466 }
1467
1468 *p_class = class;
fe635c7e 1469
49016aca
TH
1470 return 0;
1471
1472 err_out:
88574551 1473 if (ata_msg_warn(ap))
0dd4b21f 1474 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1475 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1476 return rc;
1477}
1478
3373efd8 1479static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1480{
3373efd8 1481 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1482}
1483
a6e6ce8e
TH
1484static void ata_dev_config_ncq(struct ata_device *dev,
1485 char *desc, size_t desc_sz)
1486{
1487 struct ata_port *ap = dev->ap;
1488 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1489
1490 if (!ata_id_has_ncq(dev->id)) {
1491 desc[0] = '\0';
1492 return;
1493 }
6919a0a6
AC
1494 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1495 snprintf(desc, desc_sz, "NCQ (not used)");
1496 return;
1497 }
a6e6ce8e 1498 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1499 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1500 dev->flags |= ATA_DFLAG_NCQ;
1501 }
1502
1503 if (hdepth >= ddepth)
1504 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1505 else
1506 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1507}
1508
e6d902a3
BK
1509static void ata_set_port_max_cmd_len(struct ata_port *ap)
1510{
1511 int i;
1512
cca3974e
JG
1513 if (ap->scsi_host) {
1514 unsigned int len = 0;
1515
e6d902a3 1516 for (i = 0; i < ATA_MAX_DEVICES; i++)
cca3974e
JG
1517 len = max(len, ap->device[i].cdb_len);
1518
1519 ap->scsi_host->max_cmd_len = len;
e6d902a3
BK
1520 }
1521}
1522
49016aca 1523/**
ffeae418 1524 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1525 * @dev: Target device to configure
1526 *
1527 * Configure @dev according to @dev->id. Generic and low-level
1528 * driver specific fixups are also applied.
49016aca
TH
1529 *
1530 * LOCKING:
ffeae418
TH
1531 * Kernel thread context (may sleep)
1532 *
1533 * RETURNS:
1534 * 0 on success, -errno otherwise
49016aca 1535 */
efdaedc4 1536int ata_dev_configure(struct ata_device *dev)
49016aca 1537{
3373efd8 1538 struct ata_port *ap = dev->ap;
efdaedc4 1539 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1540 const u16 *id = dev->id;
ff8854b2 1541 unsigned int xfer_mask;
b352e57d 1542 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1543 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1544 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1545 int rc;
49016aca 1546
0dd4b21f 1547 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
88574551
TH
1548 ata_dev_printk(dev, KERN_INFO,
1549 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1550 __FUNCTION__, ap->id, dev->devno);
ffeae418 1551 return 0;
49016aca
TH
1552 }
1553
0dd4b21f 1554 if (ata_msg_probe(ap))
88574551
TH
1555 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1556 __FUNCTION__, ap->id, dev->devno);
1da177e4 1557
c39f5ebe 1558 /* print device capabilities */
0dd4b21f 1559 if (ata_msg_probe(ap))
88574551
TH
1560 ata_dev_printk(dev, KERN_DEBUG,
1561 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1562 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1563 __FUNCTION__,
f15a1daf
TH
1564 id[49], id[82], id[83], id[84],
1565 id[85], id[86], id[87], id[88]);
c39f5ebe 1566
208a9933 1567 /* initialize to-be-configured parameters */
ea1dd4e1 1568 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1569 dev->max_sectors = 0;
1570 dev->cdb_len = 0;
1571 dev->n_sectors = 0;
1572 dev->cylinders = 0;
1573 dev->heads = 0;
1574 dev->sectors = 0;
1575
1da177e4
LT
1576 /*
1577 * common ATA, ATAPI feature tests
1578 */
1579
ff8854b2 1580 /* find max transfer mode; for printk only */
1148c3a7 1581 xfer_mask = ata_id_xfermask(id);
1da177e4 1582
0dd4b21f
BP
1583 if (ata_msg_probe(ap))
1584 ata_dump_id(id);
1da177e4
LT
1585
1586 /* ATA-specific feature tests */
1587 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1588 if (ata_id_is_cfa(id)) {
1589 if (id[162] & 1) /* CPRM may make this media unusable */
1590 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1591 ap->id, dev->devno);
1592 snprintf(revbuf, 7, "CFA");
1593 }
1594 else
1595 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1596
1148c3a7 1597 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1598
3f64f565
EM
1599 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1600 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV_OFS,
1601 sizeof(fwrevbuf));
1602
1603 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD_OFS,
1604 sizeof(modelbuf));
1605
1606 if (dev->id[59] & 0x100)
1607 dev->multi_count = dev->id[59] & 0xff;
1608
1148c3a7 1609 if (ata_id_has_lba(id)) {
4c2d721a 1610 const char *lba_desc;
a6e6ce8e 1611 char ncq_desc[20];
8bf62ece 1612
4c2d721a
TH
1613 lba_desc = "LBA";
1614 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1615 if (ata_id_has_lba48(id)) {
8bf62ece 1616 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1617 lba_desc = "LBA48";
6fc49adb
TH
1618
1619 if (dev->n_sectors >= (1UL << 28) &&
1620 ata_id_has_flush_ext(id))
1621 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1622 }
8bf62ece 1623
a6e6ce8e
TH
1624 /* config NCQ */
1625 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1626
8bf62ece 1627 /* print device info to dmesg */
3f64f565
EM
1628 if (ata_msg_drv(ap) && print_info) {
1629 ata_dev_printk(dev, KERN_INFO,
1630 "%s: %s, %s, max %s\n",
1631 revbuf, modelbuf, fwrevbuf,
1632 ata_mode_string(xfer_mask));
1633 ata_dev_printk(dev, KERN_INFO,
1634 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1635 (unsigned long long)dev->n_sectors,
3f64f565
EM
1636 dev->multi_count, lba_desc, ncq_desc);
1637 }
ffeae418 1638 } else {
8bf62ece
AL
1639 /* CHS */
1640
1641 /* Default translation */
1148c3a7
TH
1642 dev->cylinders = id[1];
1643 dev->heads = id[3];
1644 dev->sectors = id[6];
8bf62ece 1645
1148c3a7 1646 if (ata_id_current_chs_valid(id)) {
8bf62ece 1647 /* Current CHS translation is valid. */
1148c3a7
TH
1648 dev->cylinders = id[54];
1649 dev->heads = id[55];
1650 dev->sectors = id[56];
8bf62ece
AL
1651 }
1652
1653 /* print device info to dmesg */
3f64f565 1654 if (ata_msg_drv(ap) && print_info) {
88574551 1655 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1656 "%s: %s, %s, max %s\n",
1657 revbuf, modelbuf, fwrevbuf,
1658 ata_mode_string(xfer_mask));
1659 ata_dev_printk(dev, KERN_INFO,
1660 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1661 (unsigned long long)dev->n_sectors,
1662 dev->multi_count, dev->cylinders,
1663 dev->heads, dev->sectors);
1664 }
07f6f7d0
AL
1665 }
1666
6e7846e9 1667 dev->cdb_len = 16;
1da177e4
LT
1668 }
1669
1670 /* ATAPI-specific feature tests */
2c13b7ce 1671 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1672 char *cdb_intr_string = "";
1673
1148c3a7 1674 rc = atapi_cdb_len(id);
1da177e4 1675 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1676 if (ata_msg_warn(ap))
88574551
TH
1677 ata_dev_printk(dev, KERN_WARNING,
1678 "unsupported CDB len\n");
ffeae418 1679 rc = -EINVAL;
1da177e4
LT
1680 goto err_out_nosup;
1681 }
6e7846e9 1682 dev->cdb_len = (unsigned int) rc;
1da177e4 1683
08a556db 1684 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1685 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1686 cdb_intr_string = ", CDB intr";
1687 }
312f7da2 1688
1da177e4 1689 /* print device info to dmesg */
5afc8142 1690 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1691 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1692 ata_mode_string(xfer_mask),
1693 cdb_intr_string);
1da177e4
LT
1694 }
1695
914ed354
TH
1696 /* determine max_sectors */
1697 dev->max_sectors = ATA_MAX_SECTORS;
1698 if (dev->flags & ATA_DFLAG_LBA48)
1699 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1700
93590859
AC
1701 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1702 /* Let the user know. We don't want to disallow opens for
1703 rescue purposes, or in case the vendor is just a blithering
1704 idiot */
1705 if (print_info) {
1706 ata_dev_printk(dev, KERN_WARNING,
1707"Drive reports diagnostics failure. This may indicate a drive\n");
1708 ata_dev_printk(dev, KERN_WARNING,
1709"fault or invalid emulation. Contact drive vendor for information.\n");
1710 }
1711 }
1712
e6d902a3 1713 ata_set_port_max_cmd_len(ap);
6e7846e9 1714
4b2f3ede 1715 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1716 if (ata_dev_knobble(dev)) {
5afc8142 1717 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1718 ata_dev_printk(dev, KERN_INFO,
1719 "applying bridge limits\n");
5a529139 1720 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1721 dev->max_sectors = ATA_MAX_SECTORS;
1722 }
1723
1724 if (ap->ops->dev_config)
1725 ap->ops->dev_config(ap, dev);
1726
0dd4b21f
BP
1727 if (ata_msg_probe(ap))
1728 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1729 __FUNCTION__, ata_chk_status(ap));
ffeae418 1730 return 0;
1da177e4
LT
1731
1732err_out_nosup:
0dd4b21f 1733 if (ata_msg_probe(ap))
88574551
TH
1734 ata_dev_printk(dev, KERN_DEBUG,
1735 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1736 return rc;
1da177e4
LT
1737}
1738
1739/**
1740 * ata_bus_probe - Reset and probe ATA bus
1741 * @ap: Bus to probe
1742 *
0cba632b
JG
1743 * Master ATA bus probing function. Initiates a hardware-dependent
1744 * bus reset, then attempts to identify any devices found on
1745 * the bus.
1746 *
1da177e4 1747 * LOCKING:
0cba632b 1748 * PCI/etc. bus probe sem.
1da177e4
LT
1749 *
1750 * RETURNS:
96072e69 1751 * Zero on success, negative errno otherwise.
1da177e4
LT
1752 */
1753
80289167 1754int ata_bus_probe(struct ata_port *ap)
1da177e4 1755{
28ca5c57 1756 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1
TH
1757 int tries[ATA_MAX_DEVICES];
1758 int i, rc, down_xfermask;
e82cbdb9 1759 struct ata_device *dev;
1da177e4 1760
28ca5c57 1761 ata_port_probe(ap);
c19ba8af 1762
14d2bac1
TH
1763 for (i = 0; i < ATA_MAX_DEVICES; i++)
1764 tries[i] = ATA_PROBE_MAX_TRIES;
1765
1766 retry:
1767 down_xfermask = 0;
1768
2044470c 1769 /* reset and determine device classes */
52783c5d 1770 ap->ops->phy_reset(ap);
2061a47a 1771
52783c5d
TH
1772 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1773 dev = &ap->device[i];
c19ba8af 1774
52783c5d
TH
1775 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1776 dev->class != ATA_DEV_UNKNOWN)
1777 classes[dev->devno] = dev->class;
1778 else
1779 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1780
52783c5d 1781 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1782 }
1da177e4 1783
52783c5d 1784 ata_port_probe(ap);
2044470c 1785
b6079ca4
AC
1786 /* after the reset the device state is PIO 0 and the controller
1787 state is undefined. Record the mode */
1788
1789 for (i = 0; i < ATA_MAX_DEVICES; i++)
1790 ap->device[i].pio_mode = XFER_PIO_0;
1791
28ca5c57 1792 /* read IDENTIFY page and configure devices */
1da177e4 1793 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e82cbdb9 1794 dev = &ap->device[i];
28ca5c57 1795
ec573755
TH
1796 if (tries[i])
1797 dev->class = classes[i];
ffeae418 1798
14d2bac1 1799 if (!ata_dev_enabled(dev))
ffeae418 1800 continue;
ffeae418 1801
bff04647
TH
1802 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1803 dev->id);
14d2bac1
TH
1804 if (rc)
1805 goto fail;
1806
efdaedc4
TH
1807 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1808 rc = ata_dev_configure(dev);
1809 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1810 if (rc)
1811 goto fail;
1da177e4
LT
1812 }
1813
e82cbdb9 1814 /* configure transfer mode */
3adcebb2 1815 rc = ata_set_mode(ap, &dev);
51713d35
TH
1816 if (rc) {
1817 down_xfermask = 1;
1818 goto fail;
e82cbdb9 1819 }
1da177e4 1820
e82cbdb9
TH
1821 for (i = 0; i < ATA_MAX_DEVICES; i++)
1822 if (ata_dev_enabled(&ap->device[i]))
1823 return 0;
1da177e4 1824
e82cbdb9
TH
1825 /* no device present, disable port */
1826 ata_port_disable(ap);
1da177e4 1827 ap->ops->port_disable(ap);
96072e69 1828 return -ENODEV;
14d2bac1
TH
1829
1830 fail:
1831 switch (rc) {
1832 case -EINVAL:
1833 case -ENODEV:
1834 tries[dev->devno] = 0;
1835 break;
1836 case -EIO:
3c567b7d 1837 sata_down_spd_limit(ap);
14d2bac1
TH
1838 /* fall through */
1839 default:
1840 tries[dev->devno]--;
1841 if (down_xfermask &&
3373efd8 1842 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
14d2bac1
TH
1843 tries[dev->devno] = 0;
1844 }
1845
ec573755 1846 if (!tries[dev->devno]) {
3373efd8
TH
1847 ata_down_xfermask_limit(dev, 1);
1848 ata_dev_disable(dev);
ec573755
TH
1849 }
1850
14d2bac1 1851 goto retry;
1da177e4
LT
1852}
1853
1854/**
0cba632b
JG
1855 * ata_port_probe - Mark port as enabled
1856 * @ap: Port for which we indicate enablement
1da177e4 1857 *
0cba632b
JG
1858 * Modify @ap data structure such that the system
1859 * thinks that the entire port is enabled.
1860 *
cca3974e 1861 * LOCKING: host lock, or some other form of
0cba632b 1862 * serialization.
1da177e4
LT
1863 */
1864
1865void ata_port_probe(struct ata_port *ap)
1866{
198e0fed 1867 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1868}
1869
3be680b7
TH
1870/**
1871 * sata_print_link_status - Print SATA link status
1872 * @ap: SATA port to printk link status about
1873 *
1874 * This function prints link speed and status of a SATA link.
1875 *
1876 * LOCKING:
1877 * None.
1878 */
1879static void sata_print_link_status(struct ata_port *ap)
1880{
6d5f9732 1881 u32 sstatus, scontrol, tmp;
3be680b7 1882
81952c54 1883 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1884 return;
81952c54 1885 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1886
81952c54 1887 if (ata_port_online(ap)) {
3be680b7 1888 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1889 ata_port_printk(ap, KERN_INFO,
1890 "SATA link up %s (SStatus %X SControl %X)\n",
1891 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1892 } else {
f15a1daf
TH
1893 ata_port_printk(ap, KERN_INFO,
1894 "SATA link down (SStatus %X SControl %X)\n",
1895 sstatus, scontrol);
3be680b7
TH
1896 }
1897}
1898
1da177e4 1899/**
780a87f7
JG
1900 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1901 * @ap: SATA port associated with target SATA PHY.
1da177e4 1902 *
780a87f7
JG
1903 * This function issues commands to standard SATA Sxxx
1904 * PHY registers, to wake up the phy (and device), and
1905 * clear any reset condition.
1da177e4
LT
1906 *
1907 * LOCKING:
0cba632b 1908 * PCI/etc. bus probe sem.
1da177e4
LT
1909 *
1910 */
1911void __sata_phy_reset(struct ata_port *ap)
1912{
1913 u32 sstatus;
1914 unsigned long timeout = jiffies + (HZ * 5);
1915
1916 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1917 /* issue phy wake/reset */
81952c54 1918 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1919 /* Couldn't find anything in SATA I/II specs, but
1920 * AHCI-1.1 10.4.2 says at least 1 ms. */
1921 mdelay(1);
1da177e4 1922 }
81952c54
TH
1923 /* phy wake/clear reset */
1924 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
1925
1926 /* wait for phy to become ready, if necessary */
1927 do {
1928 msleep(200);
81952c54 1929 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
1930 if ((sstatus & 0xf) != 1)
1931 break;
1932 } while (time_before(jiffies, timeout));
1933
3be680b7
TH
1934 /* print link status */
1935 sata_print_link_status(ap);
656563e3 1936
3be680b7 1937 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 1938 if (!ata_port_offline(ap))
1da177e4 1939 ata_port_probe(ap);
3be680b7 1940 else
1da177e4 1941 ata_port_disable(ap);
1da177e4 1942
198e0fed 1943 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1944 return;
1945
1946 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1947 ata_port_disable(ap);
1948 return;
1949 }
1950
1951 ap->cbl = ATA_CBL_SATA;
1952}
1953
1954/**
780a87f7
JG
1955 * sata_phy_reset - Reset SATA bus.
1956 * @ap: SATA port associated with target SATA PHY.
1da177e4 1957 *
780a87f7
JG
1958 * This function resets the SATA bus, and then probes
1959 * the bus for devices.
1da177e4
LT
1960 *
1961 * LOCKING:
0cba632b 1962 * PCI/etc. bus probe sem.
1da177e4
LT
1963 *
1964 */
1965void sata_phy_reset(struct ata_port *ap)
1966{
1967 __sata_phy_reset(ap);
198e0fed 1968 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1969 return;
1970 ata_bus_reset(ap);
1971}
1972
ebdfca6e
AC
1973/**
1974 * ata_dev_pair - return other device on cable
ebdfca6e
AC
1975 * @adev: device
1976 *
1977 * Obtain the other device on the same cable, or if none is
1978 * present NULL is returned
1979 */
2e9edbf8 1980
3373efd8 1981struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 1982{
3373efd8 1983 struct ata_port *ap = adev->ap;
ebdfca6e 1984 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 1985 if (!ata_dev_enabled(pair))
ebdfca6e
AC
1986 return NULL;
1987 return pair;
1988}
1989
1da177e4 1990/**
780a87f7
JG
1991 * ata_port_disable - Disable port.
1992 * @ap: Port to be disabled.
1da177e4 1993 *
780a87f7
JG
1994 * Modify @ap data structure such that the system
1995 * thinks that the entire port is disabled, and should
1996 * never attempt to probe or communicate with devices
1997 * on this port.
1998 *
cca3974e 1999 * LOCKING: host lock, or some other form of
780a87f7 2000 * serialization.
1da177e4
LT
2001 */
2002
2003void ata_port_disable(struct ata_port *ap)
2004{
2005 ap->device[0].class = ATA_DEV_NONE;
2006 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2007 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2008}
2009
1c3fae4d 2010/**
3c567b7d 2011 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2012 * @ap: Port to adjust SATA spd limit for
2013 *
2014 * Adjust SATA spd limit of @ap downward. Note that this
2015 * function only adjusts the limit. The change must be applied
3c567b7d 2016 * using sata_set_spd().
1c3fae4d
TH
2017 *
2018 * LOCKING:
2019 * Inherited from caller.
2020 *
2021 * RETURNS:
2022 * 0 on success, negative errno on failure
2023 */
3c567b7d 2024int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2025{
81952c54
TH
2026 u32 sstatus, spd, mask;
2027 int rc, highbit;
1c3fae4d 2028
81952c54
TH
2029 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2030 if (rc)
2031 return rc;
1c3fae4d
TH
2032
2033 mask = ap->sata_spd_limit;
2034 if (mask <= 1)
2035 return -EINVAL;
2036 highbit = fls(mask) - 1;
2037 mask &= ~(1 << highbit);
2038
81952c54 2039 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2040 if (spd <= 1)
2041 return -EINVAL;
2042 spd--;
2043 mask &= (1 << spd) - 1;
2044 if (!mask)
2045 return -EINVAL;
2046
2047 ap->sata_spd_limit = mask;
2048
f15a1daf
TH
2049 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2050 sata_spd_string(fls(mask)));
1c3fae4d
TH
2051
2052 return 0;
2053}
2054
3c567b7d 2055static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2056{
2057 u32 spd, limit;
2058
2059 if (ap->sata_spd_limit == UINT_MAX)
2060 limit = 0;
2061 else
2062 limit = fls(ap->sata_spd_limit);
2063
2064 spd = (*scontrol >> 4) & 0xf;
2065 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2066
2067 return spd != limit;
2068}
2069
2070/**
3c567b7d 2071 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2072 * @ap: Port in question
2073 *
2074 * Test whether the spd limit in SControl matches
2075 * @ap->sata_spd_limit. This function is used to determine
2076 * whether hardreset is necessary to apply SATA spd
2077 * configuration.
2078 *
2079 * LOCKING:
2080 * Inherited from caller.
2081 *
2082 * RETURNS:
2083 * 1 if SATA spd configuration is needed, 0 otherwise.
2084 */
3c567b7d 2085int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2086{
2087 u32 scontrol;
2088
81952c54 2089 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2090 return 0;
2091
3c567b7d 2092 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2093}
2094
2095/**
3c567b7d 2096 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2097 * @ap: Port to set SATA spd for
2098 *
2099 * Set SATA spd of @ap according to sata_spd_limit.
2100 *
2101 * LOCKING:
2102 * Inherited from caller.
2103 *
2104 * RETURNS:
2105 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2106 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2107 */
3c567b7d 2108int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2109{
2110 u32 scontrol;
81952c54 2111 int rc;
1c3fae4d 2112
81952c54
TH
2113 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2114 return rc;
1c3fae4d 2115
3c567b7d 2116 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2117 return 0;
2118
81952c54
TH
2119 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2120 return rc;
2121
1c3fae4d
TH
2122 return 1;
2123}
2124
452503f9
AC
2125/*
2126 * This mode timing computation functionality is ported over from
2127 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2128 */
2129/*
b352e57d 2130 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2131 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2132 * for UDMA6, which is currently supported only by Maxtor drives.
2133 *
2134 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2135 */
2136
2137static const struct ata_timing ata_timing[] = {
2138
2139 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2140 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2141 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2142 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2143
b352e57d
AC
2144 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2145 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2146 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2147 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2148 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2149
2150/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2151
452503f9
AC
2152 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2153 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2154 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2155
452503f9
AC
2156 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2157 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2158 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2159
b352e57d
AC
2160 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2161 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2162 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2163 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2164
2165 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2166 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2167 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2168
2169/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2170
2171 { 0xFF }
2172};
2173
2174#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2175#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2176
2177static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2178{
2179 q->setup = EZ(t->setup * 1000, T);
2180 q->act8b = EZ(t->act8b * 1000, T);
2181 q->rec8b = EZ(t->rec8b * 1000, T);
2182 q->cyc8b = EZ(t->cyc8b * 1000, T);
2183 q->active = EZ(t->active * 1000, T);
2184 q->recover = EZ(t->recover * 1000, T);
2185 q->cycle = EZ(t->cycle * 1000, T);
2186 q->udma = EZ(t->udma * 1000, UT);
2187}
2188
2189void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2190 struct ata_timing *m, unsigned int what)
2191{
2192 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2193 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2194 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2195 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2196 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2197 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2198 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2199 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2200}
2201
2202static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2203{
2204 const struct ata_timing *t;
2205
2206 for (t = ata_timing; t->mode != speed; t++)
91190758 2207 if (t->mode == 0xFF)
452503f9 2208 return NULL;
2e9edbf8 2209 return t;
452503f9
AC
2210}
2211
2212int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2213 struct ata_timing *t, int T, int UT)
2214{
2215 const struct ata_timing *s;
2216 struct ata_timing p;
2217
2218 /*
2e9edbf8 2219 * Find the mode.
75b1f2f8 2220 */
452503f9
AC
2221
2222 if (!(s = ata_timing_find_mode(speed)))
2223 return -EINVAL;
2224
75b1f2f8
AL
2225 memcpy(t, s, sizeof(*s));
2226
452503f9
AC
2227 /*
2228 * If the drive is an EIDE drive, it can tell us it needs extended
2229 * PIO/MW_DMA cycle timing.
2230 */
2231
2232 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2233 memset(&p, 0, sizeof(p));
2234 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2235 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2236 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2237 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2238 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2239 }
2240 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2241 }
2242
2243 /*
2244 * Convert the timing to bus clock counts.
2245 */
2246
75b1f2f8 2247 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2248
2249 /*
c893a3ae
RD
2250 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2251 * S.M.A.R.T * and some other commands. We have to ensure that the
2252 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2253 */
2254
fd3367af 2255 if (speed > XFER_PIO_6) {
452503f9
AC
2256 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2257 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2258 }
2259
2260 /*
c893a3ae 2261 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2262 */
2263
2264 if (t->act8b + t->rec8b < t->cyc8b) {
2265 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2266 t->rec8b = t->cyc8b - t->act8b;
2267 }
2268
2269 if (t->active + t->recover < t->cycle) {
2270 t->active += (t->cycle - (t->active + t->recover)) / 2;
2271 t->recover = t->cycle - t->active;
2272 }
2273
2274 return 0;
2275}
2276
cf176e1a
TH
2277/**
2278 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a
TH
2279 * @dev: Device to adjust xfer masks
2280 * @force_pio0: Force PIO0
2281 *
2282 * Adjust xfer masks of @dev downward. Note that this function
2283 * does not apply the change. Invoking ata_set_mode() afterwards
2284 * will apply the limit.
2285 *
2286 * LOCKING:
2287 * Inherited from caller.
2288 *
2289 * RETURNS:
2290 * 0 on success, negative errno on failure
2291 */
3373efd8 2292int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
cf176e1a
TH
2293{
2294 unsigned long xfer_mask;
2295 int highbit;
2296
2297 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2298 dev->udma_mask);
2299
2300 if (!xfer_mask)
2301 goto fail;
2302 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2303 if (xfer_mask & ATA_MASK_UDMA)
2304 xfer_mask &= ~ATA_MASK_MWDMA;
2305
2306 highbit = fls(xfer_mask) - 1;
2307 xfer_mask &= ~(1 << highbit);
2308 if (force_pio0)
2309 xfer_mask &= 1 << ATA_SHIFT_PIO;
2310 if (!xfer_mask)
2311 goto fail;
2312
2313 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2314 &dev->udma_mask);
2315
f15a1daf
TH
2316 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2317 ata_mode_string(xfer_mask));
cf176e1a
TH
2318
2319 return 0;
2320
2321 fail:
2322 return -EINVAL;
2323}
2324
3373efd8 2325static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2326{
baa1e78a 2327 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2328 unsigned int err_mask;
2329 int rc;
1da177e4 2330
e8384607 2331 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2332 if (dev->xfer_shift == ATA_SHIFT_PIO)
2333 dev->flags |= ATA_DFLAG_PIO;
2334
3373efd8 2335 err_mask = ata_dev_set_xfermode(dev);
83206a29 2336 if (err_mask) {
f15a1daf
TH
2337 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2338 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2339 return -EIO;
2340 }
1da177e4 2341
baa1e78a 2342 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2343 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2344 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2345 if (rc)
83206a29 2346 return rc;
48a8a14f 2347
23e71c3d
TH
2348 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2349 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2350
f15a1daf
TH
2351 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2352 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2353 return 0;
1da177e4
LT
2354}
2355
1da177e4
LT
2356/**
2357 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2358 * @ap: port on which timings will be programmed
e82cbdb9 2359 * @r_failed_dev: out paramter for failed device
1da177e4 2360 *
e82cbdb9
TH
2361 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2362 * ata_set_mode() fails, pointer to the failing device is
2363 * returned in @r_failed_dev.
780a87f7 2364 *
1da177e4 2365 * LOCKING:
0cba632b 2366 * PCI/etc. bus probe sem.
e82cbdb9
TH
2367 *
2368 * RETURNS:
2369 * 0 on success, negative errno otherwise
1da177e4 2370 */
1ad8e7f9 2371int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2372{
e8e0619f 2373 struct ata_device *dev;
e82cbdb9 2374 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2375
3adcebb2 2376 /* has private set_mode? */
b229a7b0
AC
2377 if (ap->ops->set_mode)
2378 return ap->ops->set_mode(ap, r_failed_dev);
3adcebb2 2379
a6d5a51c
TH
2380 /* step 1: calculate xfer_mask */
2381 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2382 unsigned int pio_mask, dma_mask;
a6d5a51c 2383
e8e0619f
TH
2384 dev = &ap->device[i];
2385
e1211e3f 2386 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2387 continue;
2388
3373efd8 2389 ata_dev_xfermask(dev);
1da177e4 2390
acf356b1
TH
2391 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2392 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2393 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2394 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2395
4f65977d 2396 found = 1;
5444a6f4
AC
2397 if (dev->dma_mode)
2398 used_dma = 1;
a6d5a51c 2399 }
4f65977d 2400 if (!found)
e82cbdb9 2401 goto out;
a6d5a51c
TH
2402
2403 /* step 2: always set host PIO timings */
e8e0619f
TH
2404 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2405 dev = &ap->device[i];
2406 if (!ata_dev_enabled(dev))
2407 continue;
2408
2409 if (!dev->pio_mode) {
f15a1daf 2410 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2411 rc = -EINVAL;
e82cbdb9 2412 goto out;
e8e0619f
TH
2413 }
2414
2415 dev->xfer_mode = dev->pio_mode;
2416 dev->xfer_shift = ATA_SHIFT_PIO;
2417 if (ap->ops->set_piomode)
2418 ap->ops->set_piomode(ap, dev);
2419 }
1da177e4 2420
a6d5a51c 2421 /* step 3: set host DMA timings */
e8e0619f
TH
2422 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2423 dev = &ap->device[i];
2424
2425 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2426 continue;
2427
2428 dev->xfer_mode = dev->dma_mode;
2429 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2430 if (ap->ops->set_dmamode)
2431 ap->ops->set_dmamode(ap, dev);
2432 }
1da177e4
LT
2433
2434 /* step 4: update devices' xfer mode */
83206a29 2435 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2436 dev = &ap->device[i];
1da177e4 2437
18d90deb 2438 /* don't update suspended devices' xfer mode */
02670bf3 2439 if (!ata_dev_ready(dev))
83206a29
TH
2440 continue;
2441
3373efd8 2442 rc = ata_dev_set_mode(dev);
5bbc53f4 2443 if (rc)
e82cbdb9 2444 goto out;
83206a29 2445 }
1da177e4 2446
e8e0619f
TH
2447 /* Record simplex status. If we selected DMA then the other
2448 * host channels are not permitted to do so.
5444a6f4 2449 */
cca3974e
JG
2450 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2451 ap->host->simplex_claimed = 1;
5444a6f4 2452
e8e0619f 2453 /* step5: chip specific finalisation */
1da177e4
LT
2454 if (ap->ops->post_set_mode)
2455 ap->ops->post_set_mode(ap);
2456
e82cbdb9
TH
2457 out:
2458 if (rc)
2459 *r_failed_dev = dev;
2460 return rc;
1da177e4
LT
2461}
2462
1fdffbce
JG
2463/**
2464 * ata_tf_to_host - issue ATA taskfile to host controller
2465 * @ap: port to which command is being issued
2466 * @tf: ATA taskfile register set
2467 *
2468 * Issues ATA taskfile register set to ATA host controller,
2469 * with proper synchronization with interrupt handler and
2470 * other threads.
2471 *
2472 * LOCKING:
cca3974e 2473 * spin_lock_irqsave(host lock)
1fdffbce
JG
2474 */
2475
2476static inline void ata_tf_to_host(struct ata_port *ap,
2477 const struct ata_taskfile *tf)
2478{
2479 ap->ops->tf_load(ap, tf);
2480 ap->ops->exec_command(ap, tf);
2481}
2482
1da177e4
LT
2483/**
2484 * ata_busy_sleep - sleep until BSY clears, or timeout
2485 * @ap: port containing status register to be polled
2486 * @tmout_pat: impatience timeout
2487 * @tmout: overall timeout
2488 *
780a87f7
JG
2489 * Sleep until ATA Status register bit BSY clears,
2490 * or a timeout occurs.
2491 *
d1adc1bb
TH
2492 * LOCKING:
2493 * Kernel thread context (may sleep).
2494 *
2495 * RETURNS:
2496 * 0 on success, -errno otherwise.
1da177e4 2497 */
d1adc1bb
TH
2498int ata_busy_sleep(struct ata_port *ap,
2499 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2500{
2501 unsigned long timer_start, timeout;
2502 u8 status;
2503
2504 status = ata_busy_wait(ap, ATA_BUSY, 300);
2505 timer_start = jiffies;
2506 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2507 while (status != 0xff && (status & ATA_BUSY) &&
2508 time_before(jiffies, timeout)) {
1da177e4
LT
2509 msleep(50);
2510 status = ata_busy_wait(ap, ATA_BUSY, 3);
2511 }
2512
d1adc1bb 2513 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2514 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2515 "port is slow to respond, please be patient "
2516 "(Status 0x%x)\n", status);
1da177e4
LT
2517
2518 timeout = timer_start + tmout;
d1adc1bb
TH
2519 while (status != 0xff && (status & ATA_BUSY) &&
2520 time_before(jiffies, timeout)) {
1da177e4
LT
2521 msleep(50);
2522 status = ata_chk_status(ap);
2523 }
2524
d1adc1bb
TH
2525 if (status == 0xff)
2526 return -ENODEV;
2527
1da177e4 2528 if (status & ATA_BUSY) {
f15a1daf 2529 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2530 "(%lu secs, Status 0x%x)\n",
2531 tmout / HZ, status);
d1adc1bb 2532 return -EBUSY;
1da177e4
LT
2533 }
2534
2535 return 0;
2536}
2537
2538static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2539{
2540 struct ata_ioports *ioaddr = &ap->ioaddr;
2541 unsigned int dev0 = devmask & (1 << 0);
2542 unsigned int dev1 = devmask & (1 << 1);
2543 unsigned long timeout;
2544
2545 /* if device 0 was found in ata_devchk, wait for its
2546 * BSY bit to clear
2547 */
2548 if (dev0)
2549 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2550
2551 /* if device 1 was found in ata_devchk, wait for
2552 * register access, then wait for BSY to clear
2553 */
2554 timeout = jiffies + ATA_TMOUT_BOOT;
2555 while (dev1) {
2556 u8 nsect, lbal;
2557
2558 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2559 nsect = ioread8(ioaddr->nsect_addr);
2560 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2561 if ((nsect == 1) && (lbal == 1))
2562 break;
2563 if (time_after(jiffies, timeout)) {
2564 dev1 = 0;
2565 break;
2566 }
2567 msleep(50); /* give drive a breather */
2568 }
2569 if (dev1)
2570 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2571
2572 /* is all this really necessary? */
2573 ap->ops->dev_select(ap, 0);
2574 if (dev1)
2575 ap->ops->dev_select(ap, 1);
2576 if (dev0)
2577 ap->ops->dev_select(ap, 0);
2578}
2579
1da177e4
LT
2580static unsigned int ata_bus_softreset(struct ata_port *ap,
2581 unsigned int devmask)
2582{
2583 struct ata_ioports *ioaddr = &ap->ioaddr;
2584
2585 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2586
2587 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2588 iowrite8(ap->ctl, ioaddr->ctl_addr);
2589 udelay(20); /* FIXME: flush */
2590 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2591 udelay(20); /* FIXME: flush */
2592 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2593
2594 /* spec mandates ">= 2ms" before checking status.
2595 * We wait 150ms, because that was the magic delay used for
2596 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2597 * between when the ATA command register is written, and then
2598 * status is checked. Because waiting for "a while" before
2599 * checking status is fine, post SRST, we perform this magic
2600 * delay here as well.
09c7ad79
AC
2601 *
2602 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2603 */
2604 msleep(150);
2605
2e9edbf8 2606 /* Before we perform post reset processing we want to see if
298a41ca
TH
2607 * the bus shows 0xFF because the odd clown forgets the D7
2608 * pulldown resistor.
2609 */
d1adc1bb
TH
2610 if (ata_check_status(ap) == 0xFF)
2611 return 0;
09c7ad79 2612
1da177e4
LT
2613 ata_bus_post_reset(ap, devmask);
2614
2615 return 0;
2616}
2617
2618/**
2619 * ata_bus_reset - reset host port and associated ATA channel
2620 * @ap: port to reset
2621 *
2622 * This is typically the first time we actually start issuing
2623 * commands to the ATA channel. We wait for BSY to clear, then
2624 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2625 * result. Determine what devices, if any, are on the channel
2626 * by looking at the device 0/1 error register. Look at the signature
2627 * stored in each device's taskfile registers, to determine if
2628 * the device is ATA or ATAPI.
2629 *
2630 * LOCKING:
0cba632b 2631 * PCI/etc. bus probe sem.
cca3974e 2632 * Obtains host lock.
1da177e4
LT
2633 *
2634 * SIDE EFFECTS:
198e0fed 2635 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2636 */
2637
2638void ata_bus_reset(struct ata_port *ap)
2639{
2640 struct ata_ioports *ioaddr = &ap->ioaddr;
2641 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2642 u8 err;
aec5c3c1 2643 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4
LT
2644
2645 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2646
2647 /* determine if device 0/1 are present */
2648 if (ap->flags & ATA_FLAG_SATA_RESET)
2649 dev0 = 1;
2650 else {
2651 dev0 = ata_devchk(ap, 0);
2652 if (slave_possible)
2653 dev1 = ata_devchk(ap, 1);
2654 }
2655
2656 if (dev0)
2657 devmask |= (1 << 0);
2658 if (dev1)
2659 devmask |= (1 << 1);
2660
2661 /* select device 0 again */
2662 ap->ops->dev_select(ap, 0);
2663
2664 /* issue bus reset */
2665 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2666 if (ata_bus_softreset(ap, devmask))
2667 goto err_out;
1da177e4
LT
2668
2669 /*
2670 * determine by signature whether we have ATA or ATAPI devices
2671 */
b4dc7623 2672 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2673 if ((slave_possible) && (err != 0x81))
b4dc7623 2674 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2675
2676 /* re-enable interrupts */
83625006 2677 ap->ops->irq_on(ap);
1da177e4
LT
2678
2679 /* is double-select really necessary? */
2680 if (ap->device[1].class != ATA_DEV_NONE)
2681 ap->ops->dev_select(ap, 1);
2682 if (ap->device[0].class != ATA_DEV_NONE)
2683 ap->ops->dev_select(ap, 0);
2684
2685 /* if no devices were detected, disable this port */
2686 if ((ap->device[0].class == ATA_DEV_NONE) &&
2687 (ap->device[1].class == ATA_DEV_NONE))
2688 goto err_out;
2689
2690 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2691 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2692 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2693 }
2694
2695 DPRINTK("EXIT\n");
2696 return;
2697
2698err_out:
f15a1daf 2699 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2700 ap->ops->port_disable(ap);
2701
2702 DPRINTK("EXIT\n");
2703}
2704
d7bb4cc7
TH
2705/**
2706 * sata_phy_debounce - debounce SATA phy status
2707 * @ap: ATA port to debounce SATA phy status for
2708 * @params: timing parameters { interval, duratinon, timeout } in msec
2709 *
2710 * Make sure SStatus of @ap reaches stable state, determined by
2711 * holding the same value where DET is not 1 for @duration polled
2712 * every @interval, before @timeout. Timeout constraints the
2713 * beginning of the stable state. Because, after hot unplugging,
2714 * DET gets stuck at 1 on some controllers, this functions waits
2715 * until timeout then returns 0 if DET is stable at 1.
2716 *
2717 * LOCKING:
2718 * Kernel thread context (may sleep)
2719 *
2720 * RETURNS:
2721 * 0 on success, -errno on failure.
2722 */
2723int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2724{
d7bb4cc7
TH
2725 unsigned long interval_msec = params[0];
2726 unsigned long duration = params[1] * HZ / 1000;
2727 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2728 unsigned long last_jiffies;
2729 u32 last, cur;
2730 int rc;
2731
2732 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2733 return rc;
2734 cur &= 0xf;
2735
2736 last = cur;
2737 last_jiffies = jiffies;
2738
2739 while (1) {
2740 msleep(interval_msec);
2741 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2742 return rc;
2743 cur &= 0xf;
2744
2745 /* DET stable? */
2746 if (cur == last) {
2747 if (cur == 1 && time_before(jiffies, timeout))
2748 continue;
2749 if (time_after(jiffies, last_jiffies + duration))
2750 return 0;
2751 continue;
2752 }
2753
2754 /* unstable, start over */
2755 last = cur;
2756 last_jiffies = jiffies;
2757
2758 /* check timeout */
2759 if (time_after(jiffies, timeout))
2760 return -EBUSY;
2761 }
2762}
2763
2764/**
2765 * sata_phy_resume - resume SATA phy
2766 * @ap: ATA port to resume SATA phy for
2767 * @params: timing parameters { interval, duratinon, timeout } in msec
2768 *
2769 * Resume SATA phy of @ap and debounce it.
2770 *
2771 * LOCKING:
2772 * Kernel thread context (may sleep)
2773 *
2774 * RETURNS:
2775 * 0 on success, -errno on failure.
2776 */
2777int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2778{
2779 u32 scontrol;
81952c54
TH
2780 int rc;
2781
2782 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2783 return rc;
7a7921e8 2784
852ee16a 2785 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2786
2787 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2788 return rc;
7a7921e8 2789
d7bb4cc7
TH
2790 /* Some PHYs react badly if SStatus is pounded immediately
2791 * after resuming. Delay 200ms before debouncing.
2792 */
2793 msleep(200);
7a7921e8 2794
d7bb4cc7 2795 return sata_phy_debounce(ap, params);
7a7921e8
TH
2796}
2797
f5914a46
TH
2798static void ata_wait_spinup(struct ata_port *ap)
2799{
2800 struct ata_eh_context *ehc = &ap->eh_context;
2801 unsigned long end, secs;
2802 int rc;
2803
2804 /* first, debounce phy if SATA */
2805 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2806 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2807
2808 /* if debounced successfully and offline, no need to wait */
2809 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2810 return;
2811 }
2812
2813 /* okay, let's give the drive time to spin up */
2814 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2815 secs = ((end - jiffies) + HZ - 1) / HZ;
2816
2817 if (time_after(jiffies, end))
2818 return;
2819
2820 if (secs > 5)
2821 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2822 "(%lu secs)\n", secs);
2823
2824 schedule_timeout_uninterruptible(end - jiffies);
2825}
2826
2827/**
2828 * ata_std_prereset - prepare for reset
2829 * @ap: ATA port to be reset
2830 *
2831 * @ap is about to be reset. Initialize it.
2832 *
2833 * LOCKING:
2834 * Kernel thread context (may sleep)
2835 *
2836 * RETURNS:
2837 * 0 on success, -errno otherwise.
2838 */
2839int ata_std_prereset(struct ata_port *ap)
2840{
2841 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2842 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2843 int rc;
2844
28324304
TH
2845 /* handle link resume & hotplug spinup */
2846 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2847 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2848 ehc->i.action |= ATA_EH_HARDRESET;
2849
2850 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2851 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2852 ata_wait_spinup(ap);
f5914a46
TH
2853
2854 /* if we're about to do hardreset, nothing more to do */
2855 if (ehc->i.action & ATA_EH_HARDRESET)
2856 return 0;
2857
2858 /* if SATA, resume phy */
2859 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2860 rc = sata_phy_resume(ap, timing);
2861 if (rc && rc != -EOPNOTSUPP) {
2862 /* phy resume failed */
2863 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2864 "link for reset (errno=%d)\n", rc);
2865 return rc;
2866 }
2867 }
2868
2869 /* Wait for !BSY if the controller can wait for the first D2H
2870 * Reg FIS and we don't know that no device is attached.
2871 */
2872 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2873 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2874
2875 return 0;
2876}
2877
c2bd5804
TH
2878/**
2879 * ata_std_softreset - reset host port via ATA SRST
2880 * @ap: port to reset
c2bd5804
TH
2881 * @classes: resulting classes of attached devices
2882 *
52783c5d 2883 * Reset host port using ATA SRST.
c2bd5804
TH
2884 *
2885 * LOCKING:
2886 * Kernel thread context (may sleep)
2887 *
2888 * RETURNS:
2889 * 0 on success, -errno otherwise.
2890 */
2bf2cb26 2891int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
2892{
2893 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2894 unsigned int devmask = 0, err_mask;
2895 u8 err;
2896
2897 DPRINTK("ENTER\n");
2898
81952c54 2899 if (ata_port_offline(ap)) {
3a39746a
TH
2900 classes[0] = ATA_DEV_NONE;
2901 goto out;
2902 }
2903
c2bd5804
TH
2904 /* determine if device 0/1 are present */
2905 if (ata_devchk(ap, 0))
2906 devmask |= (1 << 0);
2907 if (slave_possible && ata_devchk(ap, 1))
2908 devmask |= (1 << 1);
2909
c2bd5804
TH
2910 /* select device 0 again */
2911 ap->ops->dev_select(ap, 0);
2912
2913 /* issue bus reset */
2914 DPRINTK("about to softreset, devmask=%x\n", devmask);
2915 err_mask = ata_bus_softreset(ap, devmask);
2916 if (err_mask) {
f15a1daf
TH
2917 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2918 err_mask);
c2bd5804
TH
2919 return -EIO;
2920 }
2921
2922 /* determine by signature whether we have ATA or ATAPI devices */
2923 classes[0] = ata_dev_try_classify(ap, 0, &err);
2924 if (slave_possible && err != 0x81)
2925 classes[1] = ata_dev_try_classify(ap, 1, &err);
2926
3a39746a 2927 out:
c2bd5804
TH
2928 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2929 return 0;
2930}
2931
2932/**
b6103f6d 2933 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 2934 * @ap: port to reset
b6103f6d 2935 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
2936 *
2937 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
2938 *
2939 * LOCKING:
2940 * Kernel thread context (may sleep)
2941 *
2942 * RETURNS:
2943 * 0 on success, -errno otherwise.
2944 */
b6103f6d 2945int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 2946{
852ee16a 2947 u32 scontrol;
81952c54 2948 int rc;
852ee16a 2949
c2bd5804
TH
2950 DPRINTK("ENTER\n");
2951
3c567b7d 2952 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
2953 /* SATA spec says nothing about how to reconfigure
2954 * spd. To be on the safe side, turn off phy during
2955 * reconfiguration. This works for at least ICH7 AHCI
2956 * and Sil3124.
2957 */
81952c54 2958 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 2959 goto out;
81952c54 2960
a34b6fc0 2961 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
2962
2963 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 2964 goto out;
1c3fae4d 2965
3c567b7d 2966 sata_set_spd(ap);
1c3fae4d
TH
2967 }
2968
2969 /* issue phy wake/reset */
81952c54 2970 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 2971 goto out;
81952c54 2972
852ee16a 2973 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
2974
2975 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 2976 goto out;
c2bd5804 2977
1c3fae4d 2978 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
2979 * 10.4.2 says at least 1 ms.
2980 */
2981 msleep(1);
2982
1c3fae4d 2983 /* bring phy back */
b6103f6d
TH
2984 rc = sata_phy_resume(ap, timing);
2985 out:
2986 DPRINTK("EXIT, rc=%d\n", rc);
2987 return rc;
2988}
2989
2990/**
2991 * sata_std_hardreset - reset host port via SATA phy reset
2992 * @ap: port to reset
2993 * @class: resulting class of attached device
2994 *
2995 * SATA phy-reset host port using DET bits of SControl register,
2996 * wait for !BSY and classify the attached device.
2997 *
2998 * LOCKING:
2999 * Kernel thread context (may sleep)
3000 *
3001 * RETURNS:
3002 * 0 on success, -errno otherwise.
3003 */
3004int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3005{
3006 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3007 int rc;
3008
3009 DPRINTK("ENTER\n");
3010
3011 /* do hardreset */
3012 rc = sata_port_hardreset(ap, timing);
3013 if (rc) {
3014 ata_port_printk(ap, KERN_ERR,
3015 "COMRESET failed (errno=%d)\n", rc);
3016 return rc;
3017 }
c2bd5804 3018
c2bd5804 3019 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3020 if (ata_port_offline(ap)) {
c2bd5804
TH
3021 *class = ATA_DEV_NONE;
3022 DPRINTK("EXIT, link offline\n");
3023 return 0;
3024 }
3025
3026 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3027 ata_port_printk(ap, KERN_ERR,
3028 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3029 return -EIO;
3030 }
3031
3a39746a
TH
3032 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3033
c2bd5804
TH
3034 *class = ata_dev_try_classify(ap, 0, NULL);
3035
3036 DPRINTK("EXIT, class=%u\n", *class);
3037 return 0;
3038}
3039
3040/**
3041 * ata_std_postreset - standard postreset callback
3042 * @ap: the target ata_port
3043 * @classes: classes of attached devices
3044 *
3045 * This function is invoked after a successful reset. Note that
3046 * the device might have been reset more than once using
3047 * different reset methods before postreset is invoked.
c2bd5804 3048 *
c2bd5804
TH
3049 * LOCKING:
3050 * Kernel thread context (may sleep)
3051 */
3052void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3053{
dc2b3515
TH
3054 u32 serror;
3055
c2bd5804
TH
3056 DPRINTK("ENTER\n");
3057
c2bd5804 3058 /* print link status */
81952c54 3059 sata_print_link_status(ap);
c2bd5804 3060
dc2b3515
TH
3061 /* clear SError */
3062 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3063 sata_scr_write(ap, SCR_ERROR, serror);
3064
3a39746a 3065 /* re-enable interrupts */
83625006
AI
3066 if (!ap->ops->error_handler)
3067 ap->ops->irq_on(ap);
c2bd5804
TH
3068
3069 /* is double-select really necessary? */
3070 if (classes[0] != ATA_DEV_NONE)
3071 ap->ops->dev_select(ap, 1);
3072 if (classes[1] != ATA_DEV_NONE)
3073 ap->ops->dev_select(ap, 0);
3074
3a39746a
TH
3075 /* bail out if no device is present */
3076 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3077 DPRINTK("EXIT, no device\n");
3078 return;
3079 }
3080
3081 /* set up device control */
0d5ff566
TH
3082 if (ap->ioaddr.ctl_addr)
3083 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3084
3085 DPRINTK("EXIT\n");
3086}
3087
623a3128
TH
3088/**
3089 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3090 * @dev: device to compare against
3091 * @new_class: class of the new device
3092 * @new_id: IDENTIFY page of the new device
3093 *
3094 * Compare @new_class and @new_id against @dev and determine
3095 * whether @dev is the device indicated by @new_class and
3096 * @new_id.
3097 *
3098 * LOCKING:
3099 * None.
3100 *
3101 * RETURNS:
3102 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3103 */
3373efd8
TH
3104static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3105 const u16 *new_id)
623a3128
TH
3106{
3107 const u16 *old_id = dev->id;
a0cf733b
TH
3108 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3109 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3110 u64 new_n_sectors;
3111
3112 if (dev->class != new_class) {
f15a1daf
TH
3113 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3114 dev->class, new_class);
623a3128
TH
3115 return 0;
3116 }
3117
a0cf733b
TH
3118 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3119 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3120 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3121 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3122 new_n_sectors = ata_id_n_sectors(new_id);
3123
3124 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3125 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3126 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3127 return 0;
3128 }
3129
3130 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3131 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3132 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3133 return 0;
3134 }
3135
3136 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3137 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3138 "%llu != %llu\n",
3139 (unsigned long long)dev->n_sectors,
3140 (unsigned long long)new_n_sectors);
623a3128
TH
3141 return 0;
3142 }
3143
3144 return 1;
3145}
3146
3147/**
3148 * ata_dev_revalidate - Revalidate ATA device
623a3128 3149 * @dev: device to revalidate
bff04647 3150 * @readid_flags: read ID flags
623a3128
TH
3151 *
3152 * Re-read IDENTIFY page and make sure @dev is still attached to
3153 * the port.
3154 *
3155 * LOCKING:
3156 * Kernel thread context (may sleep)
3157 *
3158 * RETURNS:
3159 * 0 on success, negative errno otherwise
3160 */
bff04647 3161int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3162{
5eb45c02 3163 unsigned int class = dev->class;
f15a1daf 3164 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3165 int rc;
3166
5eb45c02
TH
3167 if (!ata_dev_enabled(dev)) {
3168 rc = -ENODEV;
3169 goto fail;
3170 }
623a3128 3171
fe635c7e 3172 /* read ID data */
bff04647 3173 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3174 if (rc)
3175 goto fail;
3176
3177 /* is the device still there? */
3373efd8 3178 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3179 rc = -ENODEV;
3180 goto fail;
3181 }
3182
fe635c7e 3183 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3184
3185 /* configure device according to the new ID */
efdaedc4 3186 rc = ata_dev_configure(dev);
5eb45c02
TH
3187 if (rc == 0)
3188 return 0;
623a3128
TH
3189
3190 fail:
f15a1daf 3191 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3192 return rc;
3193}
3194
6919a0a6
AC
3195struct ata_blacklist_entry {
3196 const char *model_num;
3197 const char *model_rev;
3198 unsigned long horkage;
3199};
3200
3201static const struct ata_blacklist_entry ata_device_blacklist [] = {
3202 /* Devices with DMA related problems under Linux */
3203 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3204 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3205 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3206 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3207 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3208 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3209 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3210 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3211 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3212 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3213 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3214 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3215 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3216 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3217 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3218 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3219 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3220 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3221 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3222 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3223 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3224 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3225 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3226 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3227 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3228 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3229 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3230 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3231 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3232 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3233
3234 /* Devices we expect to fail diagnostics */
3235
3236 /* Devices where NCQ should be avoided */
3237 /* NCQ is slow */
3238 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3239
3240 /* Devices with NCQ limits */
3241
3242 /* End Marker */
3243 { }
1da177e4 3244};
2e9edbf8 3245
6919a0a6 3246unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3247{
8bfa79fc
TH
3248 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3249 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3250 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3251
8bfa79fc
TH
3252 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3253 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3254
6919a0a6 3255 while (ad->model_num) {
8bfa79fc 3256 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3257 if (ad->model_rev == NULL)
3258 return ad->horkage;
8bfa79fc 3259 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3260 return ad->horkage;
f4b15fef 3261 }
6919a0a6 3262 ad++;
f4b15fef 3263 }
1da177e4
LT
3264 return 0;
3265}
3266
6919a0a6
AC
3267static int ata_dma_blacklisted(const struct ata_device *dev)
3268{
3269 /* We don't support polling DMA.
3270 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3271 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3272 */
3273 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3274 (dev->flags & ATA_DFLAG_CDB_INTR))
3275 return 1;
3276 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3277}
3278
a6d5a51c
TH
3279/**
3280 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3281 * @dev: Device to compute xfermask for
3282 *
acf356b1
TH
3283 * Compute supported xfermask of @dev and store it in
3284 * dev->*_mask. This function is responsible for applying all
3285 * known limits including host controller limits, device
3286 * blacklist, etc...
a6d5a51c
TH
3287 *
3288 * LOCKING:
3289 * None.
a6d5a51c 3290 */
3373efd8 3291static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3292{
3373efd8 3293 struct ata_port *ap = dev->ap;
cca3974e 3294 struct ata_host *host = ap->host;
a6d5a51c 3295 unsigned long xfer_mask;
1da177e4 3296
37deecb5 3297 /* controller modes available */
565083e1
TH
3298 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3299 ap->mwdma_mask, ap->udma_mask);
3300
3301 /* Apply cable rule here. Don't apply it early because when
3302 * we handle hot plug the cable type can itself change.
3303 */
3304 if (ap->cbl == ATA_CBL_PATA40)
3305 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
fc085150
AC
3306 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3307 * host side are checked drive side as well. Cases where we know a
3308 * 40wire cable is used safely for 80 are not checked here.
3309 */
3310 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3311 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3312
1da177e4 3313
37deecb5
TH
3314 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3315 dev->mwdma_mask, dev->udma_mask);
3316 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3317
b352e57d
AC
3318 /*
3319 * CFA Advanced TrueIDE timings are not allowed on a shared
3320 * cable
3321 */
3322 if (ata_dev_pair(dev)) {
3323 /* No PIO5 or PIO6 */
3324 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3325 /* No MWDMA3 or MWDMA 4 */
3326 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3327 }
3328
37deecb5
TH
3329 if (ata_dma_blacklisted(dev)) {
3330 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3331 ata_dev_printk(dev, KERN_WARNING,
3332 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3333 }
a6d5a51c 3334
cca3974e 3335 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
37deecb5
TH
3336 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3337 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3338 "other device, disabling DMA\n");
5444a6f4 3339 }
565083e1 3340
5444a6f4
AC
3341 if (ap->ops->mode_filter)
3342 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3343
565083e1
TH
3344 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3345 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3346}
3347
1da177e4
LT
3348/**
3349 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3350 * @dev: Device to which command will be sent
3351 *
780a87f7
JG
3352 * Issue SET FEATURES - XFER MODE command to device @dev
3353 * on port @ap.
3354 *
1da177e4 3355 * LOCKING:
0cba632b 3356 * PCI/etc. bus probe sem.
83206a29
TH
3357 *
3358 * RETURNS:
3359 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3360 */
3361
3373efd8 3362static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3363{
a0123703 3364 struct ata_taskfile tf;
83206a29 3365 unsigned int err_mask;
1da177e4
LT
3366
3367 /* set up set-features taskfile */
3368 DPRINTK("set features - xfer mode\n");
3369
3373efd8 3370 ata_tf_init(dev, &tf);
a0123703
TH
3371 tf.command = ATA_CMD_SET_FEATURES;
3372 tf.feature = SETFEATURES_XFER;
3373 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3374 tf.protocol = ATA_PROT_NODATA;
3375 tf.nsect = dev->xfer_mode;
1da177e4 3376
3373efd8 3377 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3378
83206a29
TH
3379 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3380 return err_mask;
1da177e4
LT
3381}
3382
8bf62ece
AL
3383/**
3384 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3385 * @dev: Device to which command will be sent
e2a7f77a
RD
3386 * @heads: Number of heads (taskfile parameter)
3387 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3388 *
3389 * LOCKING:
6aff8f1f
TH
3390 * Kernel thread context (may sleep)
3391 *
3392 * RETURNS:
3393 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3394 */
3373efd8
TH
3395static unsigned int ata_dev_init_params(struct ata_device *dev,
3396 u16 heads, u16 sectors)
8bf62ece 3397{
a0123703 3398 struct ata_taskfile tf;
6aff8f1f 3399 unsigned int err_mask;
8bf62ece
AL
3400
3401 /* Number of sectors per track 1-255. Number of heads 1-16 */
3402 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3403 return AC_ERR_INVALID;
8bf62ece
AL
3404
3405 /* set up init dev params taskfile */
3406 DPRINTK("init dev params \n");
3407
3373efd8 3408 ata_tf_init(dev, &tf);
a0123703
TH
3409 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3410 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3411 tf.protocol = ATA_PROT_NODATA;
3412 tf.nsect = sectors;
3413 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3414
3373efd8 3415 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3416
6aff8f1f
TH
3417 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3418 return err_mask;
8bf62ece
AL
3419}
3420
1da177e4 3421/**
0cba632b
JG
3422 * ata_sg_clean - Unmap DMA memory associated with command
3423 * @qc: Command containing DMA memory to be released
3424 *
3425 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3426 *
3427 * LOCKING:
cca3974e 3428 * spin_lock_irqsave(host lock)
1da177e4 3429 */
70e6ad0c 3430void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3431{
3432 struct ata_port *ap = qc->ap;
cedc9a47 3433 struct scatterlist *sg = qc->__sg;
1da177e4 3434 int dir = qc->dma_dir;
cedc9a47 3435 void *pad_buf = NULL;
1da177e4 3436
a4631474
TH
3437 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3438 WARN_ON(sg == NULL);
1da177e4
LT
3439
3440 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3441 WARN_ON(qc->n_elem > 1);
1da177e4 3442
2c13b7ce 3443 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3444
cedc9a47
JG
3445 /* if we padded the buffer out to 32-bit bound, and data
3446 * xfer direction is from-device, we must copy from the
3447 * pad buffer back into the supplied buffer
3448 */
3449 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3450 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3451
3452 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3453 if (qc->n_elem)
2f1f610b 3454 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3455 /* restore last sg */
3456 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3457 if (pad_buf) {
3458 struct scatterlist *psg = &qc->pad_sgent;
3459 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3460 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3461 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3462 }
3463 } else {
2e242fa9 3464 if (qc->n_elem)
2f1f610b 3465 dma_unmap_single(ap->dev,
e1410f2d
JG
3466 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3467 dir);
cedc9a47
JG
3468 /* restore sg */
3469 sg->length += qc->pad_len;
3470 if (pad_buf)
3471 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3472 pad_buf, qc->pad_len);
3473 }
1da177e4
LT
3474
3475 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3476 qc->__sg = NULL;
1da177e4
LT
3477}
3478
3479/**
3480 * ata_fill_sg - Fill PCI IDE PRD table
3481 * @qc: Metadata associated with taskfile to be transferred
3482 *
780a87f7
JG
3483 * Fill PCI IDE PRD (scatter-gather) table with segments
3484 * associated with the current disk command.
3485 *
1da177e4 3486 * LOCKING:
cca3974e 3487 * spin_lock_irqsave(host lock)
1da177e4
LT
3488 *
3489 */
3490static void ata_fill_sg(struct ata_queued_cmd *qc)
3491{
1da177e4 3492 struct ata_port *ap = qc->ap;
cedc9a47
JG
3493 struct scatterlist *sg;
3494 unsigned int idx;
1da177e4 3495
a4631474 3496 WARN_ON(qc->__sg == NULL);
f131883e 3497 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3498
3499 idx = 0;
cedc9a47 3500 ata_for_each_sg(sg, qc) {
1da177e4
LT
3501 u32 addr, offset;
3502 u32 sg_len, len;
3503
3504 /* determine if physical DMA addr spans 64K boundary.
3505 * Note h/w doesn't support 64-bit, so we unconditionally
3506 * truncate dma_addr_t to u32.
3507 */
3508 addr = (u32) sg_dma_address(sg);
3509 sg_len = sg_dma_len(sg);
3510
3511 while (sg_len) {
3512 offset = addr & 0xffff;
3513 len = sg_len;
3514 if ((offset + sg_len) > 0x10000)
3515 len = 0x10000 - offset;
3516
3517 ap->prd[idx].addr = cpu_to_le32(addr);
3518 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3519 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3520
3521 idx++;
3522 sg_len -= len;
3523 addr += len;
3524 }
3525 }
3526
3527 if (idx)
3528 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3529}
3530/**
3531 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3532 * @qc: Metadata associated with taskfile to check
3533 *
780a87f7
JG
3534 * Allow low-level driver to filter ATA PACKET commands, returning
3535 * a status indicating whether or not it is OK to use DMA for the
3536 * supplied PACKET command.
3537 *
1da177e4 3538 * LOCKING:
cca3974e 3539 * spin_lock_irqsave(host lock)
0cba632b 3540 *
1da177e4
LT
3541 * RETURNS: 0 when ATAPI DMA can be used
3542 * nonzero otherwise
3543 */
3544int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3545{
3546 struct ata_port *ap = qc->ap;
3547 int rc = 0; /* Assume ATAPI DMA is OK by default */
3548
3549 if (ap->ops->check_atapi_dma)
3550 rc = ap->ops->check_atapi_dma(qc);
3551
3552 return rc;
3553}
3554/**
3555 * ata_qc_prep - Prepare taskfile for submission
3556 * @qc: Metadata associated with taskfile to be prepared
3557 *
780a87f7
JG
3558 * Prepare ATA taskfile for submission.
3559 *
1da177e4 3560 * LOCKING:
cca3974e 3561 * spin_lock_irqsave(host lock)
1da177e4
LT
3562 */
3563void ata_qc_prep(struct ata_queued_cmd *qc)
3564{
3565 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3566 return;
3567
3568 ata_fill_sg(qc);
3569}
3570
e46834cd
BK
3571void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3572
0cba632b
JG
3573/**
3574 * ata_sg_init_one - Associate command with memory buffer
3575 * @qc: Command to be associated
3576 * @buf: Memory buffer
3577 * @buflen: Length of memory buffer, in bytes.
3578 *
3579 * Initialize the data-related elements of queued_cmd @qc
3580 * to point to a single memory buffer, @buf of byte length @buflen.
3581 *
3582 * LOCKING:
cca3974e 3583 * spin_lock_irqsave(host lock)
0cba632b
JG
3584 */
3585
1da177e4
LT
3586void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3587{
1da177e4
LT
3588 qc->flags |= ATA_QCFLAG_SINGLE;
3589
cedc9a47 3590 qc->__sg = &qc->sgent;
1da177e4 3591 qc->n_elem = 1;
cedc9a47 3592 qc->orig_n_elem = 1;
1da177e4 3593 qc->buf_virt = buf;
233277ca 3594 qc->nbytes = buflen;
1da177e4 3595
61c0596c 3596 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3597}
3598
0cba632b
JG
3599/**
3600 * ata_sg_init - Associate command with scatter-gather table.
3601 * @qc: Command to be associated
3602 * @sg: Scatter-gather table.
3603 * @n_elem: Number of elements in s/g table.
3604 *
3605 * Initialize the data-related elements of queued_cmd @qc
3606 * to point to a scatter-gather table @sg, containing @n_elem
3607 * elements.
3608 *
3609 * LOCKING:
cca3974e 3610 * spin_lock_irqsave(host lock)
0cba632b
JG
3611 */
3612
1da177e4
LT
3613void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3614 unsigned int n_elem)
3615{
3616 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3617 qc->__sg = sg;
1da177e4 3618 qc->n_elem = n_elem;
cedc9a47 3619 qc->orig_n_elem = n_elem;
1da177e4
LT
3620}
3621
3622/**
0cba632b
JG
3623 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3624 * @qc: Command with memory buffer to be mapped.
3625 *
3626 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3627 *
3628 * LOCKING:
cca3974e 3629 * spin_lock_irqsave(host lock)
1da177e4
LT
3630 *
3631 * RETURNS:
0cba632b 3632 * Zero on success, negative on error.
1da177e4
LT
3633 */
3634
3635static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3636{
3637 struct ata_port *ap = qc->ap;
3638 int dir = qc->dma_dir;
cedc9a47 3639 struct scatterlist *sg = qc->__sg;
1da177e4 3640 dma_addr_t dma_address;
2e242fa9 3641 int trim_sg = 0;
1da177e4 3642
cedc9a47
JG
3643 /* we must lengthen transfers to end on a 32-bit boundary */
3644 qc->pad_len = sg->length & 3;
3645 if (qc->pad_len) {
3646 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3647 struct scatterlist *psg = &qc->pad_sgent;
3648
a4631474 3649 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3650
3651 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3652
3653 if (qc->tf.flags & ATA_TFLAG_WRITE)
3654 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3655 qc->pad_len);
3656
3657 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3658 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3659 /* trim sg */
3660 sg->length -= qc->pad_len;
2e242fa9
TH
3661 if (sg->length == 0)
3662 trim_sg = 1;
cedc9a47
JG
3663
3664 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3665 sg->length, qc->pad_len);
3666 }
3667
2e242fa9
TH
3668 if (trim_sg) {
3669 qc->n_elem--;
e1410f2d
JG
3670 goto skip_map;
3671 }
3672
2f1f610b 3673 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3674 sg->length, dir);
537a95d9
TH
3675 if (dma_mapping_error(dma_address)) {
3676 /* restore sg */
3677 sg->length += qc->pad_len;
1da177e4 3678 return -1;
537a95d9 3679 }
1da177e4
LT
3680
3681 sg_dma_address(sg) = dma_address;
32529e01 3682 sg_dma_len(sg) = sg->length;
1da177e4 3683
2e242fa9 3684skip_map:
1da177e4
LT
3685 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3686 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3687
3688 return 0;
3689}
3690
3691/**
0cba632b
JG
3692 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3693 * @qc: Command with scatter-gather table to be mapped.
3694 *
3695 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3696 *
3697 * LOCKING:
cca3974e 3698 * spin_lock_irqsave(host lock)
1da177e4
LT
3699 *
3700 * RETURNS:
0cba632b 3701 * Zero on success, negative on error.
1da177e4
LT
3702 *
3703 */
3704
3705static int ata_sg_setup(struct ata_queued_cmd *qc)
3706{
3707 struct ata_port *ap = qc->ap;
cedc9a47
JG
3708 struct scatterlist *sg = qc->__sg;
3709 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3710 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4
LT
3711
3712 VPRINTK("ENTER, ata%u\n", ap->id);
a4631474 3713 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3714
cedc9a47
JG
3715 /* we must lengthen transfers to end on a 32-bit boundary */
3716 qc->pad_len = lsg->length & 3;
3717 if (qc->pad_len) {
3718 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3719 struct scatterlist *psg = &qc->pad_sgent;
3720 unsigned int offset;
3721
a4631474 3722 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3723
3724 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3725
3726 /*
3727 * psg->page/offset are used to copy to-be-written
3728 * data in this function or read data in ata_sg_clean.
3729 */
3730 offset = lsg->offset + lsg->length - qc->pad_len;
3731 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3732 psg->offset = offset_in_page(offset);
3733
3734 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3735 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3736 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3737 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3738 }
3739
3740 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3741 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3742 /* trim last sg */
3743 lsg->length -= qc->pad_len;
e1410f2d
JG
3744 if (lsg->length == 0)
3745 trim_sg = 1;
cedc9a47
JG
3746
3747 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3748 qc->n_elem - 1, lsg->length, qc->pad_len);
3749 }
3750
e1410f2d
JG
3751 pre_n_elem = qc->n_elem;
3752 if (trim_sg && pre_n_elem)
3753 pre_n_elem--;
3754
3755 if (!pre_n_elem) {
3756 n_elem = 0;
3757 goto skip_map;
3758 }
3759
1da177e4 3760 dir = qc->dma_dir;
2f1f610b 3761 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3762 if (n_elem < 1) {
3763 /* restore last sg */
3764 lsg->length += qc->pad_len;
1da177e4 3765 return -1;
537a95d9 3766 }
1da177e4
LT
3767
3768 DPRINTK("%d sg elements mapped\n", n_elem);
3769
e1410f2d 3770skip_map:
1da177e4
LT
3771 qc->n_elem = n_elem;
3772
3773 return 0;
3774}
3775
0baab86b 3776/**
c893a3ae 3777 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3778 * @buf: Buffer to swap
3779 * @buf_words: Number of 16-bit words in buffer.
3780 *
3781 * Swap halves of 16-bit words if needed to convert from
3782 * little-endian byte order to native cpu byte order, or
3783 * vice-versa.
3784 *
3785 * LOCKING:
6f0ef4fa 3786 * Inherited from caller.
0baab86b 3787 */
1da177e4
LT
3788void swap_buf_le16(u16 *buf, unsigned int buf_words)
3789{
3790#ifdef __BIG_ENDIAN
3791 unsigned int i;
3792
3793 for (i = 0; i < buf_words; i++)
3794 buf[i] = le16_to_cpu(buf[i]);
3795#endif /* __BIG_ENDIAN */
3796}
3797
6ae4cfb5 3798/**
0d5ff566 3799 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 3800 * @adev: device to target
6ae4cfb5
AL
3801 * @buf: data buffer
3802 * @buflen: buffer length
344babaa 3803 * @write_data: read/write
6ae4cfb5
AL
3804 *
3805 * Transfer data from/to the device data register by PIO.
3806 *
3807 * LOCKING:
3808 * Inherited from caller.
6ae4cfb5 3809 */
0d5ff566
TH
3810void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3811 unsigned int buflen, int write_data)
1da177e4 3812{
a6b2c5d4 3813 struct ata_port *ap = adev->ap;
6ae4cfb5 3814 unsigned int words = buflen >> 1;
1da177e4 3815
6ae4cfb5 3816 /* Transfer multiple of 2 bytes */
1da177e4 3817 if (write_data)
0d5ff566 3818 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 3819 else
0d5ff566 3820 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
3821
3822 /* Transfer trailing 1 byte, if any. */
3823 if (unlikely(buflen & 0x01)) {
3824 u16 align_buf[1] = { 0 };
3825 unsigned char *trailing_buf = buf + buflen - 1;
3826
3827 if (write_data) {
3828 memcpy(align_buf, trailing_buf, 1);
0d5ff566 3829 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 3830 } else {
0d5ff566 3831 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
3832 memcpy(trailing_buf, align_buf, 1);
3833 }
3834 }
1da177e4
LT
3835}
3836
75e99585 3837/**
0d5ff566 3838 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
3839 * @adev: device to target
3840 * @buf: data buffer
3841 * @buflen: buffer length
3842 * @write_data: read/write
3843 *
88574551 3844 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
3845 * transfer with interrupts disabled.
3846 *
3847 * LOCKING:
3848 * Inherited from caller.
3849 */
0d5ff566
TH
3850void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3851 unsigned int buflen, int write_data)
75e99585
AC
3852{
3853 unsigned long flags;
3854 local_irq_save(flags);
0d5ff566 3855 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
3856 local_irq_restore(flags);
3857}
3858
3859
6ae4cfb5
AL
3860/**
3861 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3862 * @qc: Command on going
3863 *
3864 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3865 *
3866 * LOCKING:
3867 * Inherited from caller.
3868 */
3869
1da177e4
LT
3870static void ata_pio_sector(struct ata_queued_cmd *qc)
3871{
3872 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3873 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3874 struct ata_port *ap = qc->ap;
3875 struct page *page;
3876 unsigned int offset;
3877 unsigned char *buf;
3878
726f0785 3879 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
14be71f4 3880 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3881
3882 page = sg[qc->cursg].page;
726f0785 3883 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
3884
3885 /* get the current page and offset */
3886 page = nth_page(page, (offset >> PAGE_SHIFT));
3887 offset %= PAGE_SIZE;
3888
1da177e4
LT
3889 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3890
91b8b313
AL
3891 if (PageHighMem(page)) {
3892 unsigned long flags;
3893
a6b2c5d4 3894 /* FIXME: use a bounce buffer */
91b8b313
AL
3895 local_irq_save(flags);
3896 buf = kmap_atomic(page, KM_IRQ0);
083958d3 3897
91b8b313 3898 /* do the actual data transfer */
a6b2c5d4 3899 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 3900
91b8b313
AL
3901 kunmap_atomic(buf, KM_IRQ0);
3902 local_irq_restore(flags);
3903 } else {
3904 buf = page_address(page);
a6b2c5d4 3905 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 3906 }
1da177e4 3907
726f0785
TH
3908 qc->curbytes += ATA_SECT_SIZE;
3909 qc->cursg_ofs += ATA_SECT_SIZE;
1da177e4 3910
726f0785 3911 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
3912 qc->cursg++;
3913 qc->cursg_ofs = 0;
3914 }
1da177e4 3915}
1da177e4 3916
07f6f7d0
AL
3917/**
3918 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3919 * @qc: Command on going
3920 *
c81e29b4 3921 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
3922 * ATA device for the DRQ request.
3923 *
3924 * LOCKING:
3925 * Inherited from caller.
3926 */
1da177e4 3927
07f6f7d0
AL
3928static void ata_pio_sectors(struct ata_queued_cmd *qc)
3929{
3930 if (is_multi_taskfile(&qc->tf)) {
3931 /* READ/WRITE MULTIPLE */
3932 unsigned int nsect;
3933
587005de 3934 WARN_ON(qc->dev->multi_count == 0);
1da177e4 3935
726f0785
TH
3936 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
3937 qc->dev->multi_count);
07f6f7d0
AL
3938 while (nsect--)
3939 ata_pio_sector(qc);
3940 } else
3941 ata_pio_sector(qc);
3942}
3943
c71c1857
AL
3944/**
3945 * atapi_send_cdb - Write CDB bytes to hardware
3946 * @ap: Port to which ATAPI device is attached.
3947 * @qc: Taskfile currently active
3948 *
3949 * When device has indicated its readiness to accept
3950 * a CDB, this function is called. Send the CDB.
3951 *
3952 * LOCKING:
3953 * caller.
3954 */
3955
3956static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3957{
3958 /* send SCSI cdb */
3959 DPRINTK("send cdb\n");
db024d53 3960 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 3961
a6b2c5d4 3962 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
3963 ata_altstatus(ap); /* flush */
3964
3965 switch (qc->tf.protocol) {
3966 case ATA_PROT_ATAPI:
3967 ap->hsm_task_state = HSM_ST;
3968 break;
3969 case ATA_PROT_ATAPI_NODATA:
3970 ap->hsm_task_state = HSM_ST_LAST;
3971 break;
3972 case ATA_PROT_ATAPI_DMA:
3973 ap->hsm_task_state = HSM_ST_LAST;
3974 /* initiate bmdma */
3975 ap->ops->bmdma_start(qc);
3976 break;
3977 }
1da177e4
LT
3978}
3979
6ae4cfb5
AL
3980/**
3981 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3982 * @qc: Command on going
3983 * @bytes: number of bytes
3984 *
3985 * Transfer Transfer data from/to the ATAPI device.
3986 *
3987 * LOCKING:
3988 * Inherited from caller.
3989 *
3990 */
3991
1da177e4
LT
3992static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3993{
3994 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3995 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3996 struct ata_port *ap = qc->ap;
3997 struct page *page;
3998 unsigned char *buf;
3999 unsigned int offset, count;
4000
563a6e1f 4001 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4002 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4003
4004next_sg:
563a6e1f 4005 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4006 /*
563a6e1f
AL
4007 * The end of qc->sg is reached and the device expects
4008 * more data to transfer. In order not to overrun qc->sg
4009 * and fulfill length specified in the byte count register,
4010 * - for read case, discard trailing data from the device
4011 * - for write case, padding zero data to the device
4012 */
4013 u16 pad_buf[1] = { 0 };
4014 unsigned int words = bytes >> 1;
4015 unsigned int i;
4016
4017 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4018 ata_dev_printk(qc->dev, KERN_WARNING,
4019 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4020
4021 for (i = 0; i < words; i++)
a6b2c5d4 4022 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4023
14be71f4 4024 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4025 return;
4026 }
4027
cedc9a47 4028 sg = &qc->__sg[qc->cursg];
1da177e4 4029
1da177e4
LT
4030 page = sg->page;
4031 offset = sg->offset + qc->cursg_ofs;
4032
4033 /* get the current page and offset */
4034 page = nth_page(page, (offset >> PAGE_SHIFT));
4035 offset %= PAGE_SIZE;
4036
6952df03 4037 /* don't overrun current sg */
32529e01 4038 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4039
4040 /* don't cross page boundaries */
4041 count = min(count, (unsigned int)PAGE_SIZE - offset);
4042
7282aa4b
AL
4043 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4044
91b8b313
AL
4045 if (PageHighMem(page)) {
4046 unsigned long flags;
4047
a6b2c5d4 4048 /* FIXME: use bounce buffer */
91b8b313
AL
4049 local_irq_save(flags);
4050 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4051
91b8b313 4052 /* do the actual data transfer */
a6b2c5d4 4053 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4054
91b8b313
AL
4055 kunmap_atomic(buf, KM_IRQ0);
4056 local_irq_restore(flags);
4057 } else {
4058 buf = page_address(page);
a6b2c5d4 4059 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4060 }
1da177e4
LT
4061
4062 bytes -= count;
4063 qc->curbytes += count;
4064 qc->cursg_ofs += count;
4065
32529e01 4066 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4067 qc->cursg++;
4068 qc->cursg_ofs = 0;
4069 }
4070
563a6e1f 4071 if (bytes)
1da177e4 4072 goto next_sg;
1da177e4
LT
4073}
4074
6ae4cfb5
AL
4075/**
4076 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4077 * @qc: Command on going
4078 *
4079 * Transfer Transfer data from/to the ATAPI device.
4080 *
4081 * LOCKING:
4082 * Inherited from caller.
6ae4cfb5
AL
4083 */
4084
1da177e4
LT
4085static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4086{
4087 struct ata_port *ap = qc->ap;
4088 struct ata_device *dev = qc->dev;
4089 unsigned int ireason, bc_lo, bc_hi, bytes;
4090 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4091
eec4c3f3
AL
4092 /* Abuse qc->result_tf for temp storage of intermediate TF
4093 * here to save some kernel stack usage.
4094 * For normal completion, qc->result_tf is not relevant. For
4095 * error, qc->result_tf is later overwritten by ata_qc_complete().
4096 * So, the correctness of qc->result_tf is not affected.
4097 */
4098 ap->ops->tf_read(ap, &qc->result_tf);
4099 ireason = qc->result_tf.nsect;
4100 bc_lo = qc->result_tf.lbam;
4101 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4102 bytes = (bc_hi << 8) | bc_lo;
4103
4104 /* shall be cleared to zero, indicating xfer of data */
4105 if (ireason & (1 << 0))
4106 goto err_out;
4107
4108 /* make sure transfer direction matches expected */
4109 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4110 if (do_write != i_write)
4111 goto err_out;
4112
312f7da2
AL
4113 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
4114
1da177e4
LT
4115 __atapi_pio_bytes(qc, bytes);
4116
4117 return;
4118
4119err_out:
f15a1daf 4120 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4121 qc->err_mask |= AC_ERR_HSM;
14be71f4 4122 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4123}
4124
4125/**
c234fb00
AL
4126 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4127 * @ap: the target ata_port
4128 * @qc: qc on going
1da177e4 4129 *
c234fb00
AL
4130 * RETURNS:
4131 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4132 */
c234fb00
AL
4133
4134static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4135{
c234fb00
AL
4136 if (qc->tf.flags & ATA_TFLAG_POLLING)
4137 return 1;
1da177e4 4138
c234fb00
AL
4139 if (ap->hsm_task_state == HSM_ST_FIRST) {
4140 if (qc->tf.protocol == ATA_PROT_PIO &&
4141 (qc->tf.flags & ATA_TFLAG_WRITE))
4142 return 1;
1da177e4 4143
c234fb00
AL
4144 if (is_atapi_taskfile(&qc->tf) &&
4145 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4146 return 1;
fe79e683
AL
4147 }
4148
c234fb00
AL
4149 return 0;
4150}
1da177e4 4151
c17ea20d
TH
4152/**
4153 * ata_hsm_qc_complete - finish a qc running on standard HSM
4154 * @qc: Command to complete
4155 * @in_wq: 1 if called from workqueue, 0 otherwise
4156 *
4157 * Finish @qc which is running on standard HSM.
4158 *
4159 * LOCKING:
cca3974e 4160 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4161 * Otherwise, none on entry and grabs host lock.
4162 */
4163static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4164{
4165 struct ata_port *ap = qc->ap;
4166 unsigned long flags;
4167
4168 if (ap->ops->error_handler) {
4169 if (in_wq) {
ba6a1308 4170 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4171
cca3974e
JG
4172 /* EH might have kicked in while host lock is
4173 * released.
c17ea20d
TH
4174 */
4175 qc = ata_qc_from_tag(ap, qc->tag);
4176 if (qc) {
4177 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4178 ap->ops->irq_on(ap);
c17ea20d
TH
4179 ata_qc_complete(qc);
4180 } else
4181 ata_port_freeze(ap);
4182 }
4183
ba6a1308 4184 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4185 } else {
4186 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4187 ata_qc_complete(qc);
4188 else
4189 ata_port_freeze(ap);
4190 }
4191 } else {
4192 if (in_wq) {
ba6a1308 4193 spin_lock_irqsave(ap->lock, flags);
83625006 4194 ap->ops->irq_on(ap);
c17ea20d 4195 ata_qc_complete(qc);
ba6a1308 4196 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4197 } else
4198 ata_qc_complete(qc);
4199 }
1da177e4 4200
c81e29b4 4201 ata_altstatus(ap); /* flush */
c17ea20d
TH
4202}
4203
bb5cb290
AL
4204/**
4205 * ata_hsm_move - move the HSM to the next state.
4206 * @ap: the target ata_port
4207 * @qc: qc on going
4208 * @status: current device status
4209 * @in_wq: 1 if called from workqueue, 0 otherwise
4210 *
4211 * RETURNS:
4212 * 1 when poll next status needed, 0 otherwise.
4213 */
9a1004d0
TH
4214int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4215 u8 status, int in_wq)
e2cec771 4216{
bb5cb290
AL
4217 unsigned long flags = 0;
4218 int poll_next;
4219
6912ccd5
AL
4220 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4221
bb5cb290
AL
4222 /* Make sure ata_qc_issue_prot() does not throw things
4223 * like DMA polling into the workqueue. Notice that
4224 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4225 */
c234fb00 4226 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4227
e2cec771 4228fsm_start:
999bb6f4
AL
4229 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4230 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4231
e2cec771
AL
4232 switch (ap->hsm_task_state) {
4233 case HSM_ST_FIRST:
bb5cb290
AL
4234 /* Send first data block or PACKET CDB */
4235
4236 /* If polling, we will stay in the work queue after
4237 * sending the data. Otherwise, interrupt handler
4238 * takes over after sending the data.
4239 */
4240 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4241
e2cec771 4242 /* check device status */
3655d1d3
AL
4243 if (unlikely((status & ATA_DRQ) == 0)) {
4244 /* handle BSY=0, DRQ=0 as error */
4245 if (likely(status & (ATA_ERR | ATA_DF)))
4246 /* device stops HSM for abort/error */
4247 qc->err_mask |= AC_ERR_DEV;
4248 else
4249 /* HSM violation. Let EH handle this */
4250 qc->err_mask |= AC_ERR_HSM;
4251
14be71f4 4252 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4253 goto fsm_start;
1da177e4
LT
4254 }
4255
71601958
AL
4256 /* Device should not ask for data transfer (DRQ=1)
4257 * when it finds something wrong.
eee6c32f
AL
4258 * We ignore DRQ here and stop the HSM by
4259 * changing hsm_task_state to HSM_ST_ERR and
4260 * let the EH abort the command or reset the device.
71601958
AL
4261 */
4262 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4263 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4264 ap->id, status);
3655d1d3 4265 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4266 ap->hsm_task_state = HSM_ST_ERR;
4267 goto fsm_start;
71601958 4268 }
1da177e4 4269
bb5cb290
AL
4270 /* Send the CDB (atapi) or the first data block (ata pio out).
4271 * During the state transition, interrupt handler shouldn't
4272 * be invoked before the data transfer is complete and
4273 * hsm_task_state is changed. Hence, the following locking.
4274 */
4275 if (in_wq)
ba6a1308 4276 spin_lock_irqsave(ap->lock, flags);
1da177e4 4277
bb5cb290
AL
4278 if (qc->tf.protocol == ATA_PROT_PIO) {
4279 /* PIO data out protocol.
4280 * send first data block.
4281 */
0565c26d 4282
bb5cb290
AL
4283 /* ata_pio_sectors() might change the state
4284 * to HSM_ST_LAST. so, the state is changed here
4285 * before ata_pio_sectors().
4286 */
4287 ap->hsm_task_state = HSM_ST;
4288 ata_pio_sectors(qc);
4289 ata_altstatus(ap); /* flush */
4290 } else
4291 /* send CDB */
4292 atapi_send_cdb(ap, qc);
4293
4294 if (in_wq)
ba6a1308 4295 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4296
4297 /* if polling, ata_pio_task() handles the rest.
4298 * otherwise, interrupt handler takes over from here.
4299 */
e2cec771 4300 break;
1c848984 4301
e2cec771
AL
4302 case HSM_ST:
4303 /* complete command or read/write the data register */
4304 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4305 /* ATAPI PIO protocol */
4306 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4307 /* No more data to transfer or device error.
4308 * Device error will be tagged in HSM_ST_LAST.
4309 */
e2cec771
AL
4310 ap->hsm_task_state = HSM_ST_LAST;
4311 goto fsm_start;
4312 }
1da177e4 4313
71601958
AL
4314 /* Device should not ask for data transfer (DRQ=1)
4315 * when it finds something wrong.
eee6c32f
AL
4316 * We ignore DRQ here and stop the HSM by
4317 * changing hsm_task_state to HSM_ST_ERR and
4318 * let the EH abort the command or reset the device.
71601958
AL
4319 */
4320 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4321 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4322 ap->id, status);
3655d1d3 4323 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4324 ap->hsm_task_state = HSM_ST_ERR;
4325 goto fsm_start;
71601958 4326 }
1da177e4 4327
e2cec771 4328 atapi_pio_bytes(qc);
7fb6ec28 4329
e2cec771
AL
4330 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4331 /* bad ireason reported by device */
4332 goto fsm_start;
1da177e4 4333
e2cec771
AL
4334 } else {
4335 /* ATA PIO protocol */
4336 if (unlikely((status & ATA_DRQ) == 0)) {
4337 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4338 if (likely(status & (ATA_ERR | ATA_DF)))
4339 /* device stops HSM for abort/error */
4340 qc->err_mask |= AC_ERR_DEV;
4341 else
55a8e2c8
TH
4342 /* HSM violation. Let EH handle this.
4343 * Phantom devices also trigger this
4344 * condition. Mark hint.
4345 */
4346 qc->err_mask |= AC_ERR_HSM |
4347 AC_ERR_NODEV_HINT;
3655d1d3 4348
e2cec771
AL
4349 ap->hsm_task_state = HSM_ST_ERR;
4350 goto fsm_start;
4351 }
1da177e4 4352
eee6c32f
AL
4353 /* For PIO reads, some devices may ask for
4354 * data transfer (DRQ=1) alone with ERR=1.
4355 * We respect DRQ here and transfer one
4356 * block of junk data before changing the
4357 * hsm_task_state to HSM_ST_ERR.
4358 *
4359 * For PIO writes, ERR=1 DRQ=1 doesn't make
4360 * sense since the data block has been
4361 * transferred to the device.
71601958
AL
4362 */
4363 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4364 /* data might be corrputed */
4365 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4366
4367 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4368 ata_pio_sectors(qc);
4369 ata_altstatus(ap);
4370 status = ata_wait_idle(ap);
4371 }
4372
3655d1d3
AL
4373 if (status & (ATA_BUSY | ATA_DRQ))
4374 qc->err_mask |= AC_ERR_HSM;
4375
eee6c32f
AL
4376 /* ata_pio_sectors() might change the
4377 * state to HSM_ST_LAST. so, the state
4378 * is changed after ata_pio_sectors().
4379 */
4380 ap->hsm_task_state = HSM_ST_ERR;
4381 goto fsm_start;
71601958
AL
4382 }
4383
e2cec771
AL
4384 ata_pio_sectors(qc);
4385
4386 if (ap->hsm_task_state == HSM_ST_LAST &&
4387 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4388 /* all data read */
4389 ata_altstatus(ap);
52a32205 4390 status = ata_wait_idle(ap);
e2cec771
AL
4391 goto fsm_start;
4392 }
4393 }
4394
4395 ata_altstatus(ap); /* flush */
bb5cb290 4396 poll_next = 1;
1da177e4
LT
4397 break;
4398
14be71f4 4399 case HSM_ST_LAST:
6912ccd5
AL
4400 if (unlikely(!ata_ok(status))) {
4401 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4402 ap->hsm_task_state = HSM_ST_ERR;
4403 goto fsm_start;
4404 }
4405
4406 /* no more data to transfer */
4332a771
AL
4407 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4408 ap->id, qc->dev->devno, status);
e2cec771 4409
6912ccd5
AL
4410 WARN_ON(qc->err_mask);
4411
e2cec771 4412 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4413
e2cec771 4414 /* complete taskfile transaction */
c17ea20d 4415 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4416
4417 poll_next = 0;
1da177e4
LT
4418 break;
4419
14be71f4 4420 case HSM_ST_ERR:
e2cec771
AL
4421 /* make sure qc->err_mask is available to
4422 * know what's wrong and recover
4423 */
4424 WARN_ON(qc->err_mask == 0);
4425
4426 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4427
999bb6f4 4428 /* complete taskfile transaction */
c17ea20d 4429 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4430
4431 poll_next = 0;
e2cec771
AL
4432 break;
4433 default:
bb5cb290 4434 poll_next = 0;
6912ccd5 4435 BUG();
1da177e4
LT
4436 }
4437
bb5cb290 4438 return poll_next;
1da177e4
LT
4439}
4440
65f27f38 4441static void ata_pio_task(struct work_struct *work)
8061f5f0 4442{
65f27f38
DH
4443 struct ata_port *ap =
4444 container_of(work, struct ata_port, port_task.work);
4445 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4446 u8 status;
a1af3734 4447 int poll_next;
8061f5f0 4448
7fb6ec28 4449fsm_start:
a1af3734 4450 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4451
a1af3734
AL
4452 /*
4453 * This is purely heuristic. This is a fast path.
4454 * Sometimes when we enter, BSY will be cleared in
4455 * a chk-status or two. If not, the drive is probably seeking
4456 * or something. Snooze for a couple msecs, then
4457 * chk-status again. If still busy, queue delayed work.
4458 */
4459 status = ata_busy_wait(ap, ATA_BUSY, 5);
4460 if (status & ATA_BUSY) {
4461 msleep(2);
4462 status = ata_busy_wait(ap, ATA_BUSY, 10);
4463 if (status & ATA_BUSY) {
31ce6dae 4464 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4465 return;
4466 }
8061f5f0
TH
4467 }
4468
a1af3734
AL
4469 /* move the HSM */
4470 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4471
a1af3734
AL
4472 /* another command or interrupt handler
4473 * may be running at this point.
4474 */
4475 if (poll_next)
7fb6ec28 4476 goto fsm_start;
8061f5f0
TH
4477}
4478
1da177e4
LT
4479/**
4480 * ata_qc_new - Request an available ATA command, for queueing
4481 * @ap: Port associated with device @dev
4482 * @dev: Device from whom we request an available command structure
4483 *
4484 * LOCKING:
0cba632b 4485 * None.
1da177e4
LT
4486 */
4487
4488static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4489{
4490 struct ata_queued_cmd *qc = NULL;
4491 unsigned int i;
4492
e3180499 4493 /* no command while frozen */
b51e9e5d 4494 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4495 return NULL;
4496
2ab7db1f
TH
4497 /* the last tag is reserved for internal command. */
4498 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4499 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4500 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4501 break;
4502 }
4503
4504 if (qc)
4505 qc->tag = i;
4506
4507 return qc;
4508}
4509
4510/**
4511 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4512 * @dev: Device from whom we request an available command structure
4513 *
4514 * LOCKING:
0cba632b 4515 * None.
1da177e4
LT
4516 */
4517
3373efd8 4518struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4519{
3373efd8 4520 struct ata_port *ap = dev->ap;
1da177e4
LT
4521 struct ata_queued_cmd *qc;
4522
4523 qc = ata_qc_new(ap);
4524 if (qc) {
1da177e4
LT
4525 qc->scsicmd = NULL;
4526 qc->ap = ap;
4527 qc->dev = dev;
1da177e4 4528
2c13b7ce 4529 ata_qc_reinit(qc);
1da177e4
LT
4530 }
4531
4532 return qc;
4533}
4534
1da177e4
LT
4535/**
4536 * ata_qc_free - free unused ata_queued_cmd
4537 * @qc: Command to complete
4538 *
4539 * Designed to free unused ata_queued_cmd object
4540 * in case something prevents using it.
4541 *
4542 * LOCKING:
cca3974e 4543 * spin_lock_irqsave(host lock)
1da177e4
LT
4544 */
4545void ata_qc_free(struct ata_queued_cmd *qc)
4546{
4ba946e9
TH
4547 struct ata_port *ap = qc->ap;
4548 unsigned int tag;
4549
a4631474 4550 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4551
4ba946e9
TH
4552 qc->flags = 0;
4553 tag = qc->tag;
4554 if (likely(ata_tag_valid(tag))) {
4ba946e9 4555 qc->tag = ATA_TAG_POISON;
6cec4a39 4556 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4557 }
1da177e4
LT
4558}
4559
76014427 4560void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4561{
dedaf2b0
TH
4562 struct ata_port *ap = qc->ap;
4563
a4631474
TH
4564 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4565 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4566
4567 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4568 ata_sg_clean(qc);
4569
7401abf2 4570 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4571 if (qc->tf.protocol == ATA_PROT_NCQ)
4572 ap->sactive &= ~(1 << qc->tag);
4573 else
4574 ap->active_tag = ATA_TAG_POISON;
7401abf2 4575
3f3791d3
AL
4576 /* atapi: mark qc as inactive to prevent the interrupt handler
4577 * from completing the command twice later, before the error handler
4578 * is called. (when rc != 0 and atapi request sense is needed)
4579 */
4580 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4581 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4582
1da177e4 4583 /* call completion callback */
77853bf2 4584 qc->complete_fn(qc);
1da177e4
LT
4585}
4586
39599a53
TH
4587static void fill_result_tf(struct ata_queued_cmd *qc)
4588{
4589 struct ata_port *ap = qc->ap;
4590
4591 ap->ops->tf_read(ap, &qc->result_tf);
4592 qc->result_tf.flags = qc->tf.flags;
4593}
4594
f686bcb8
TH
4595/**
4596 * ata_qc_complete - Complete an active ATA command
4597 * @qc: Command to complete
4598 * @err_mask: ATA Status register contents
4599 *
4600 * Indicate to the mid and upper layers that an ATA
4601 * command has completed, with either an ok or not-ok status.
4602 *
4603 * LOCKING:
cca3974e 4604 * spin_lock_irqsave(host lock)
f686bcb8
TH
4605 */
4606void ata_qc_complete(struct ata_queued_cmd *qc)
4607{
4608 struct ata_port *ap = qc->ap;
4609
4610 /* XXX: New EH and old EH use different mechanisms to
4611 * synchronize EH with regular execution path.
4612 *
4613 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4614 * Normal execution path is responsible for not accessing a
4615 * failed qc. libata core enforces the rule by returning NULL
4616 * from ata_qc_from_tag() for failed qcs.
4617 *
4618 * Old EH depends on ata_qc_complete() nullifying completion
4619 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4620 * not synchronize with interrupt handler. Only PIO task is
4621 * taken care of.
4622 */
4623 if (ap->ops->error_handler) {
b51e9e5d 4624 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4625
4626 if (unlikely(qc->err_mask))
4627 qc->flags |= ATA_QCFLAG_FAILED;
4628
4629 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4630 if (!ata_tag_internal(qc->tag)) {
4631 /* always fill result TF for failed qc */
39599a53 4632 fill_result_tf(qc);
f686bcb8
TH
4633 ata_qc_schedule_eh(qc);
4634 return;
4635 }
4636 }
4637
4638 /* read result TF if requested */
4639 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4640 fill_result_tf(qc);
f686bcb8
TH
4641
4642 __ata_qc_complete(qc);
4643 } else {
4644 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4645 return;
4646
4647 /* read result TF if failed or requested */
4648 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4649 fill_result_tf(qc);
f686bcb8
TH
4650
4651 __ata_qc_complete(qc);
4652 }
4653}
4654
dedaf2b0
TH
4655/**
4656 * ata_qc_complete_multiple - Complete multiple qcs successfully
4657 * @ap: port in question
4658 * @qc_active: new qc_active mask
4659 * @finish_qc: LLDD callback invoked before completing a qc
4660 *
4661 * Complete in-flight commands. This functions is meant to be
4662 * called from low-level driver's interrupt routine to complete
4663 * requests normally. ap->qc_active and @qc_active is compared
4664 * and commands are completed accordingly.
4665 *
4666 * LOCKING:
cca3974e 4667 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4668 *
4669 * RETURNS:
4670 * Number of completed commands on success, -errno otherwise.
4671 */
4672int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4673 void (*finish_qc)(struct ata_queued_cmd *))
4674{
4675 int nr_done = 0;
4676 u32 done_mask;
4677 int i;
4678
4679 done_mask = ap->qc_active ^ qc_active;
4680
4681 if (unlikely(done_mask & qc_active)) {
4682 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4683 "(%08x->%08x)\n", ap->qc_active, qc_active);
4684 return -EINVAL;
4685 }
4686
4687 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4688 struct ata_queued_cmd *qc;
4689
4690 if (!(done_mask & (1 << i)))
4691 continue;
4692
4693 if ((qc = ata_qc_from_tag(ap, i))) {
4694 if (finish_qc)
4695 finish_qc(qc);
4696 ata_qc_complete(qc);
4697 nr_done++;
4698 }
4699 }
4700
4701 return nr_done;
4702}
4703
1da177e4
LT
4704static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4705{
4706 struct ata_port *ap = qc->ap;
4707
4708 switch (qc->tf.protocol) {
3dc1d881 4709 case ATA_PROT_NCQ:
1da177e4
LT
4710 case ATA_PROT_DMA:
4711 case ATA_PROT_ATAPI_DMA:
4712 return 1;
4713
4714 case ATA_PROT_ATAPI:
4715 case ATA_PROT_PIO:
1da177e4
LT
4716 if (ap->flags & ATA_FLAG_PIO_DMA)
4717 return 1;
4718
4719 /* fall through */
4720
4721 default:
4722 return 0;
4723 }
4724
4725 /* never reached */
4726}
4727
4728/**
4729 * ata_qc_issue - issue taskfile to device
4730 * @qc: command to issue to device
4731 *
4732 * Prepare an ATA command to submission to device.
4733 * This includes mapping the data into a DMA-able
4734 * area, filling in the S/G table, and finally
4735 * writing the taskfile to hardware, starting the command.
4736 *
4737 * LOCKING:
cca3974e 4738 * spin_lock_irqsave(host lock)
1da177e4 4739 */
8e0e694a 4740void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4741{
4742 struct ata_port *ap = qc->ap;
4743
dedaf2b0
TH
4744 /* Make sure only one non-NCQ command is outstanding. The
4745 * check is skipped for old EH because it reuses active qc to
4746 * request ATAPI sense.
4747 */
4748 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4749
4750 if (qc->tf.protocol == ATA_PROT_NCQ) {
4751 WARN_ON(ap->sactive & (1 << qc->tag));
4752 ap->sactive |= 1 << qc->tag;
4753 } else {
4754 WARN_ON(ap->sactive);
4755 ap->active_tag = qc->tag;
4756 }
4757
e4a70e76 4758 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4759 ap->qc_active |= 1 << qc->tag;
e4a70e76 4760
1da177e4
LT
4761 if (ata_should_dma_map(qc)) {
4762 if (qc->flags & ATA_QCFLAG_SG) {
4763 if (ata_sg_setup(qc))
8e436af9 4764 goto sg_err;
1da177e4
LT
4765 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4766 if (ata_sg_setup_one(qc))
8e436af9 4767 goto sg_err;
1da177e4
LT
4768 }
4769 } else {
4770 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4771 }
4772
4773 ap->ops->qc_prep(qc);
4774
8e0e694a
TH
4775 qc->err_mask |= ap->ops->qc_issue(qc);
4776 if (unlikely(qc->err_mask))
4777 goto err;
4778 return;
1da177e4 4779
8e436af9
TH
4780sg_err:
4781 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4782 qc->err_mask |= AC_ERR_SYSTEM;
4783err:
4784 ata_qc_complete(qc);
1da177e4
LT
4785}
4786
4787/**
4788 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4789 * @qc: command to issue to device
4790 *
4791 * Using various libata functions and hooks, this function
4792 * starts an ATA command. ATA commands are grouped into
4793 * classes called "protocols", and issuing each type of protocol
4794 * is slightly different.
4795 *
0baab86b
EF
4796 * May be used as the qc_issue() entry in ata_port_operations.
4797 *
1da177e4 4798 * LOCKING:
cca3974e 4799 * spin_lock_irqsave(host lock)
1da177e4
LT
4800 *
4801 * RETURNS:
9a3d9eb0 4802 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4803 */
4804
9a3d9eb0 4805unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4806{
4807 struct ata_port *ap = qc->ap;
4808
e50362ec
AL
4809 /* Use polling pio if the LLD doesn't handle
4810 * interrupt driven pio and atapi CDB interrupt.
4811 */
4812 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4813 switch (qc->tf.protocol) {
4814 case ATA_PROT_PIO:
e3472cbe 4815 case ATA_PROT_NODATA:
e50362ec
AL
4816 case ATA_PROT_ATAPI:
4817 case ATA_PROT_ATAPI_NODATA:
4818 qc->tf.flags |= ATA_TFLAG_POLLING;
4819 break;
4820 case ATA_PROT_ATAPI_DMA:
4821 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4822 /* see ata_dma_blacklisted() */
e50362ec
AL
4823 BUG();
4824 break;
4825 default:
4826 break;
4827 }
4828 }
4829
3d3cca37
TH
4830 /* Some controllers show flaky interrupt behavior after
4831 * setting xfer mode. Use polling instead.
4832 */
4833 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4834 qc->tf.feature == SETFEATURES_XFER) &&
4835 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4836 qc->tf.flags |= ATA_TFLAG_POLLING;
4837
312f7da2 4838 /* select the device */
1da177e4
LT
4839 ata_dev_select(ap, qc->dev->devno, 1, 0);
4840
312f7da2 4841 /* start the command */
1da177e4
LT
4842 switch (qc->tf.protocol) {
4843 case ATA_PROT_NODATA:
312f7da2
AL
4844 if (qc->tf.flags & ATA_TFLAG_POLLING)
4845 ata_qc_set_polling(qc);
4846
e5338254 4847 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
4848 ap->hsm_task_state = HSM_ST_LAST;
4849
4850 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4851 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 4852
1da177e4
LT
4853 break;
4854
4855 case ATA_PROT_DMA:
587005de 4856 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4857
1da177e4
LT
4858 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4859 ap->ops->bmdma_setup(qc); /* set up bmdma */
4860 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 4861 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4862 break;
4863
312f7da2
AL
4864 case ATA_PROT_PIO:
4865 if (qc->tf.flags & ATA_TFLAG_POLLING)
4866 ata_qc_set_polling(qc);
1da177e4 4867
e5338254 4868 ata_tf_to_host(ap, &qc->tf);
312f7da2 4869
54f00389
AL
4870 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4871 /* PIO data out protocol */
4872 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 4873 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4874
4875 /* always send first data block using
e27486db 4876 * the ata_pio_task() codepath.
54f00389 4877 */
312f7da2 4878 } else {
54f00389
AL
4879 /* PIO data in protocol */
4880 ap->hsm_task_state = HSM_ST;
4881
4882 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4883 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4884
4885 /* if polling, ata_pio_task() handles the rest.
4886 * otherwise, interrupt handler takes over from here.
4887 */
312f7da2
AL
4888 }
4889
1da177e4
LT
4890 break;
4891
1da177e4 4892 case ATA_PROT_ATAPI:
1da177e4 4893 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
4894 if (qc->tf.flags & ATA_TFLAG_POLLING)
4895 ata_qc_set_polling(qc);
4896
e5338254 4897 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 4898
312f7da2
AL
4899 ap->hsm_task_state = HSM_ST_FIRST;
4900
4901 /* send cdb by polling if no cdb interrupt */
4902 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4903 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 4904 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4905 break;
4906
4907 case ATA_PROT_ATAPI_DMA:
587005de 4908 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4909
1da177e4
LT
4910 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4911 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
4912 ap->hsm_task_state = HSM_ST_FIRST;
4913
4914 /* send cdb by polling if no cdb interrupt */
4915 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 4916 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4917 break;
4918
4919 default:
4920 WARN_ON(1);
9a3d9eb0 4921 return AC_ERR_SYSTEM;
1da177e4
LT
4922 }
4923
4924 return 0;
4925}
4926
1da177e4
LT
4927/**
4928 * ata_host_intr - Handle host interrupt for given (port, task)
4929 * @ap: Port on which interrupt arrived (possibly...)
4930 * @qc: Taskfile currently active in engine
4931 *
4932 * Handle host interrupt for given queued command. Currently,
4933 * only DMA interrupts are handled. All other commands are
4934 * handled via polling with interrupts disabled (nIEN bit).
4935 *
4936 * LOCKING:
cca3974e 4937 * spin_lock_irqsave(host lock)
1da177e4
LT
4938 *
4939 * RETURNS:
4940 * One if interrupt was handled, zero if not (shared irq).
4941 */
4942
4943inline unsigned int ata_host_intr (struct ata_port *ap,
4944 struct ata_queued_cmd *qc)
4945{
ea54763f 4946 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 4947 u8 status, host_stat = 0;
1da177e4 4948
312f7da2
AL
4949 VPRINTK("ata%u: protocol %d task_state %d\n",
4950 ap->id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 4951
312f7da2
AL
4952 /* Check whether we are expecting interrupt in this state */
4953 switch (ap->hsm_task_state) {
4954 case HSM_ST_FIRST:
6912ccd5
AL
4955 /* Some pre-ATAPI-4 devices assert INTRQ
4956 * at this state when ready to receive CDB.
4957 */
1da177e4 4958
312f7da2
AL
4959 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4960 * The flag was turned on only for atapi devices.
4961 * No need to check is_atapi_taskfile(&qc->tf) again.
4962 */
4963 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 4964 goto idle_irq;
1da177e4 4965 break;
312f7da2
AL
4966 case HSM_ST_LAST:
4967 if (qc->tf.protocol == ATA_PROT_DMA ||
4968 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4969 /* check status of DMA engine */
4970 host_stat = ap->ops->bmdma_status(ap);
4971 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4972
4973 /* if it's not our irq... */
4974 if (!(host_stat & ATA_DMA_INTR))
4975 goto idle_irq;
4976
4977 /* before we do anything else, clear DMA-Start bit */
4978 ap->ops->bmdma_stop(qc);
a4f16610
AL
4979
4980 if (unlikely(host_stat & ATA_DMA_ERR)) {
4981 /* error when transfering data to/from memory */
4982 qc->err_mask |= AC_ERR_HOST_BUS;
4983 ap->hsm_task_state = HSM_ST_ERR;
4984 }
312f7da2
AL
4985 }
4986 break;
4987 case HSM_ST:
4988 break;
1da177e4
LT
4989 default:
4990 goto idle_irq;
4991 }
4992
312f7da2
AL
4993 /* check altstatus */
4994 status = ata_altstatus(ap);
4995 if (status & ATA_BUSY)
4996 goto idle_irq;
1da177e4 4997
312f7da2
AL
4998 /* check main status, clearing INTRQ */
4999 status = ata_chk_status(ap);
5000 if (unlikely(status & ATA_BUSY))
5001 goto idle_irq;
1da177e4 5002
312f7da2
AL
5003 /* ack bmdma irq events */
5004 ap->ops->irq_clear(ap);
1da177e4 5005
bb5cb290 5006 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5007
5008 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5009 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5010 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5011
1da177e4
LT
5012 return 1; /* irq handled */
5013
5014idle_irq:
5015 ap->stats.idle_irq++;
5016
5017#ifdef ATA_IRQ_TRAP
5018 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5019 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5020 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5021 return 1;
1da177e4
LT
5022 }
5023#endif
5024 return 0; /* irq not handled */
5025}
5026
5027/**
5028 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5029 * @irq: irq line (unused)
cca3974e 5030 * @dev_instance: pointer to our ata_host information structure
1da177e4 5031 *
0cba632b
JG
5032 * Default interrupt handler for PCI IDE devices. Calls
5033 * ata_host_intr() for each port that is not disabled.
5034 *
1da177e4 5035 * LOCKING:
cca3974e 5036 * Obtains host lock during operation.
1da177e4
LT
5037 *
5038 * RETURNS:
0cba632b 5039 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5040 */
5041
7d12e780 5042irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5043{
cca3974e 5044 struct ata_host *host = dev_instance;
1da177e4
LT
5045 unsigned int i;
5046 unsigned int handled = 0;
5047 unsigned long flags;
5048
5049 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5050 spin_lock_irqsave(&host->lock, flags);
1da177e4 5051
cca3974e 5052 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5053 struct ata_port *ap;
5054
cca3974e 5055 ap = host->ports[i];
c1389503 5056 if (ap &&
029f5468 5057 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5058 struct ata_queued_cmd *qc;
5059
5060 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5061 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5062 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5063 handled |= ata_host_intr(ap, qc);
5064 }
5065 }
5066
cca3974e 5067 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5068
5069 return IRQ_RETVAL(handled);
5070}
5071
34bf2170
TH
5072/**
5073 * sata_scr_valid - test whether SCRs are accessible
5074 * @ap: ATA port to test SCR accessibility for
5075 *
5076 * Test whether SCRs are accessible for @ap.
5077 *
5078 * LOCKING:
5079 * None.
5080 *
5081 * RETURNS:
5082 * 1 if SCRs are accessible, 0 otherwise.
5083 */
5084int sata_scr_valid(struct ata_port *ap)
5085{
5086 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5087}
5088
5089/**
5090 * sata_scr_read - read SCR register of the specified port
5091 * @ap: ATA port to read SCR for
5092 * @reg: SCR to read
5093 * @val: Place to store read value
5094 *
5095 * Read SCR register @reg of @ap into *@val. This function is
5096 * guaranteed to succeed if the cable type of the port is SATA
5097 * and the port implements ->scr_read.
5098 *
5099 * LOCKING:
5100 * None.
5101 *
5102 * RETURNS:
5103 * 0 on success, negative errno on failure.
5104 */
5105int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5106{
5107 if (sata_scr_valid(ap)) {
5108 *val = ap->ops->scr_read(ap, reg);
5109 return 0;
5110 }
5111 return -EOPNOTSUPP;
5112}
5113
5114/**
5115 * sata_scr_write - write SCR register of the specified port
5116 * @ap: ATA port to write SCR for
5117 * @reg: SCR to write
5118 * @val: value to write
5119 *
5120 * Write @val to SCR register @reg of @ap. This function is
5121 * guaranteed to succeed if the cable type of the port is SATA
5122 * and the port implements ->scr_read.
5123 *
5124 * LOCKING:
5125 * None.
5126 *
5127 * RETURNS:
5128 * 0 on success, negative errno on failure.
5129 */
5130int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5131{
5132 if (sata_scr_valid(ap)) {
5133 ap->ops->scr_write(ap, reg, val);
5134 return 0;
5135 }
5136 return -EOPNOTSUPP;
5137}
5138
5139/**
5140 * sata_scr_write_flush - write SCR register of the specified port and flush
5141 * @ap: ATA port to write SCR for
5142 * @reg: SCR to write
5143 * @val: value to write
5144 *
5145 * This function is identical to sata_scr_write() except that this
5146 * function performs flush after writing to the register.
5147 *
5148 * LOCKING:
5149 * None.
5150 *
5151 * RETURNS:
5152 * 0 on success, negative errno on failure.
5153 */
5154int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5155{
5156 if (sata_scr_valid(ap)) {
5157 ap->ops->scr_write(ap, reg, val);
5158 ap->ops->scr_read(ap, reg);
5159 return 0;
5160 }
5161 return -EOPNOTSUPP;
5162}
5163
5164/**
5165 * ata_port_online - test whether the given port is online
5166 * @ap: ATA port to test
5167 *
5168 * Test whether @ap is online. Note that this function returns 0
5169 * if online status of @ap cannot be obtained, so
5170 * ata_port_online(ap) != !ata_port_offline(ap).
5171 *
5172 * LOCKING:
5173 * None.
5174 *
5175 * RETURNS:
5176 * 1 if the port online status is available and online.
5177 */
5178int ata_port_online(struct ata_port *ap)
5179{
5180 u32 sstatus;
5181
5182 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5183 return 1;
5184 return 0;
5185}
5186
5187/**
5188 * ata_port_offline - test whether the given port is offline
5189 * @ap: ATA port to test
5190 *
5191 * Test whether @ap is offline. Note that this function returns
5192 * 0 if offline status of @ap cannot be obtained, so
5193 * ata_port_online(ap) != !ata_port_offline(ap).
5194 *
5195 * LOCKING:
5196 * None.
5197 *
5198 * RETURNS:
5199 * 1 if the port offline status is available and offline.
5200 */
5201int ata_port_offline(struct ata_port *ap)
5202{
5203 u32 sstatus;
5204
5205 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5206 return 1;
5207 return 0;
5208}
0baab86b 5209
77b08fb5 5210int ata_flush_cache(struct ata_device *dev)
9b847548 5211{
977e6b9f 5212 unsigned int err_mask;
9b847548
JA
5213 u8 cmd;
5214
5215 if (!ata_try_flush_cache(dev))
5216 return 0;
5217
6fc49adb 5218 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5219 cmd = ATA_CMD_FLUSH_EXT;
5220 else
5221 cmd = ATA_CMD_FLUSH;
5222
977e6b9f
TH
5223 err_mask = ata_do_simple_cmd(dev, cmd);
5224 if (err_mask) {
5225 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5226 return -EIO;
5227 }
5228
5229 return 0;
9b847548
JA
5230}
5231
cca3974e
JG
5232static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5233 unsigned int action, unsigned int ehi_flags,
5234 int wait)
500530f6
TH
5235{
5236 unsigned long flags;
5237 int i, rc;
5238
cca3974e
JG
5239 for (i = 0; i < host->n_ports; i++) {
5240 struct ata_port *ap = host->ports[i];
500530f6
TH
5241
5242 /* Previous resume operation might still be in
5243 * progress. Wait for PM_PENDING to clear.
5244 */
5245 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5246 ata_port_wait_eh(ap);
5247 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5248 }
5249
5250 /* request PM ops to EH */
5251 spin_lock_irqsave(ap->lock, flags);
5252
5253 ap->pm_mesg = mesg;
5254 if (wait) {
5255 rc = 0;
5256 ap->pm_result = &rc;
5257 }
5258
5259 ap->pflags |= ATA_PFLAG_PM_PENDING;
5260 ap->eh_info.action |= action;
5261 ap->eh_info.flags |= ehi_flags;
5262
5263 ata_port_schedule_eh(ap);
5264
5265 spin_unlock_irqrestore(ap->lock, flags);
5266
5267 /* wait and check result */
5268 if (wait) {
5269 ata_port_wait_eh(ap);
5270 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5271 if (rc)
5272 return rc;
5273 }
5274 }
5275
5276 return 0;
5277}
5278
5279/**
cca3974e
JG
5280 * ata_host_suspend - suspend host
5281 * @host: host to suspend
500530f6
TH
5282 * @mesg: PM message
5283 *
cca3974e 5284 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5285 * function requests EH to perform PM operations and waits for EH
5286 * to finish.
5287 *
5288 * LOCKING:
5289 * Kernel thread context (may sleep).
5290 *
5291 * RETURNS:
5292 * 0 on success, -errno on failure.
5293 */
cca3974e 5294int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5295{
5296 int i, j, rc;
5297
cca3974e 5298 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5299 if (rc)
5300 goto fail;
5301
5302 /* EH is quiescent now. Fail if we have any ready device.
5303 * This happens if hotplug occurs between completion of device
5304 * suspension and here.
5305 */
cca3974e
JG
5306 for (i = 0; i < host->n_ports; i++) {
5307 struct ata_port *ap = host->ports[i];
500530f6
TH
5308
5309 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5310 struct ata_device *dev = &ap->device[j];
5311
5312 if (ata_dev_ready(dev)) {
5313 ata_port_printk(ap, KERN_WARNING,
5314 "suspend failed, device %d "
5315 "still active\n", dev->devno);
5316 rc = -EBUSY;
5317 goto fail;
5318 }
5319 }
5320 }
5321
cca3974e 5322 host->dev->power.power_state = mesg;
500530f6
TH
5323 return 0;
5324
5325 fail:
cca3974e 5326 ata_host_resume(host);
500530f6
TH
5327 return rc;
5328}
5329
5330/**
cca3974e
JG
5331 * ata_host_resume - resume host
5332 * @host: host to resume
500530f6 5333 *
cca3974e 5334 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5335 * function requests EH to perform PM operations and returns.
5336 * Note that all resume operations are performed parallely.
5337 *
5338 * LOCKING:
5339 * Kernel thread context (may sleep).
5340 */
cca3974e 5341void ata_host_resume(struct ata_host *host)
500530f6 5342{
cca3974e
JG
5343 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5344 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5345 host->dev->power.power_state = PMSG_ON;
500530f6
TH
5346}
5347
c893a3ae
RD
5348/**
5349 * ata_port_start - Set port up for dma.
5350 * @ap: Port to initialize
5351 *
5352 * Called just after data structures for each port are
5353 * initialized. Allocates space for PRD table.
5354 *
5355 * May be used as the port_start() entry in ata_port_operations.
5356 *
5357 * LOCKING:
5358 * Inherited from caller.
5359 */
f0d36efd 5360int ata_port_start(struct ata_port *ap)
1da177e4 5361{
2f1f610b 5362 struct device *dev = ap->dev;
6037d6bb 5363 int rc;
1da177e4 5364
f0d36efd
TH
5365 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5366 GFP_KERNEL);
1da177e4
LT
5367 if (!ap->prd)
5368 return -ENOMEM;
5369
6037d6bb 5370 rc = ata_pad_alloc(ap, dev);
f0d36efd 5371 if (rc)
6037d6bb 5372 return rc;
1da177e4 5373
f0d36efd
TH
5374 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5375 (unsigned long long)ap->prd_dma);
1da177e4
LT
5376 return 0;
5377}
5378
3ef3b43d
TH
5379/**
5380 * ata_dev_init - Initialize an ata_device structure
5381 * @dev: Device structure to initialize
5382 *
5383 * Initialize @dev in preparation for probing.
5384 *
5385 * LOCKING:
5386 * Inherited from caller.
5387 */
5388void ata_dev_init(struct ata_device *dev)
5389{
5390 struct ata_port *ap = dev->ap;
72fa4b74
TH
5391 unsigned long flags;
5392
5a04bf4b
TH
5393 /* SATA spd limit is bound to the first device */
5394 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5395
72fa4b74
TH
5396 /* High bits of dev->flags are used to record warm plug
5397 * requests which occur asynchronously. Synchronize using
cca3974e 5398 * host lock.
72fa4b74 5399 */
ba6a1308 5400 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5401 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5402 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5403
72fa4b74
TH
5404 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5405 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5406 dev->pio_mask = UINT_MAX;
5407 dev->mwdma_mask = UINT_MAX;
5408 dev->udma_mask = UINT_MAX;
5409}
5410
1da177e4 5411/**
155a8a9c 5412 * ata_port_init - Initialize an ata_port structure
1da177e4 5413 * @ap: Structure to initialize
cca3974e 5414 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5415 * @ent: Probe information provided by low-level driver
5416 * @port_no: Port number associated with this ata_port
5417 *
155a8a9c 5418 * Initialize a new ata_port structure.
0cba632b 5419 *
1da177e4 5420 * LOCKING:
0cba632b 5421 * Inherited from caller.
1da177e4 5422 */
cca3974e 5423void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5424 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5425{
5426 unsigned int i;
5427
cca3974e 5428 ap->lock = &host->lock;
198e0fed 5429 ap->flags = ATA_FLAG_DISABLED;
155a8a9c 5430 ap->id = ata_unique_id++;
1da177e4 5431 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5432 ap->host = host;
2f1f610b 5433 ap->dev = ent->dev;
1da177e4 5434 ap->port_no = port_no;
fea63e38
TH
5435 if (port_no == 1 && ent->pinfo2) {
5436 ap->pio_mask = ent->pinfo2->pio_mask;
5437 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5438 ap->udma_mask = ent->pinfo2->udma_mask;
5439 ap->flags |= ent->pinfo2->flags;
5440 ap->ops = ent->pinfo2->port_ops;
5441 } else {
5442 ap->pio_mask = ent->pio_mask;
5443 ap->mwdma_mask = ent->mwdma_mask;
5444 ap->udma_mask = ent->udma_mask;
5445 ap->flags |= ent->port_flags;
5446 ap->ops = ent->port_ops;
5447 }
5a04bf4b 5448 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5449 ap->active_tag = ATA_TAG_POISON;
5450 ap->last_ctl = 0xFF;
bd5d825c
BP
5451
5452#if defined(ATA_VERBOSE_DEBUG)
5453 /* turn on all debugging levels */
5454 ap->msg_enable = 0x00FF;
5455#elif defined(ATA_DEBUG)
5456 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5457#else
0dd4b21f 5458 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5459#endif
1da177e4 5460
65f27f38
DH
5461 INIT_DELAYED_WORK(&ap->port_task, NULL);
5462 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5463 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5464 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5465 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5466
838df628
TH
5467 /* set cable type */
5468 ap->cbl = ATA_CBL_NONE;
5469 if (ap->flags & ATA_FLAG_SATA)
5470 ap->cbl = ATA_CBL_SATA;
5471
acf356b1
TH
5472 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5473 struct ata_device *dev = &ap->device[i];
38d87234 5474 dev->ap = ap;
72fa4b74 5475 dev->devno = i;
3ef3b43d 5476 ata_dev_init(dev);
acf356b1 5477 }
1da177e4
LT
5478
5479#ifdef ATA_IRQ_TRAP
5480 ap->stats.unhandled_irq = 1;
5481 ap->stats.idle_irq = 1;
5482#endif
5483
5484 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5485}
5486
155a8a9c 5487/**
4608c160
TH
5488 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5489 * @ap: ATA port to initialize SCSI host for
5490 * @shost: SCSI host associated with @ap
155a8a9c 5491 *
4608c160 5492 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5493 *
5494 * LOCKING:
5495 * Inherited from caller.
5496 */
4608c160 5497static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5498{
cca3974e 5499 ap->scsi_host = shost;
155a8a9c 5500
4608c160
TH
5501 shost->unique_id = ap->id;
5502 shost->max_id = 16;
5503 shost->max_lun = 1;
5504 shost->max_channel = 1;
5505 shost->max_cmd_len = 12;
155a8a9c
BK
5506}
5507
1da177e4 5508/**
996139f1 5509 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5510 * @ent: Information provided by low-level driver
cca3974e 5511 * @host: Collections of ports to which we add
1da177e4
LT
5512 * @port_no: Port number associated with this host
5513 *
0cba632b
JG
5514 * Attach low-level ATA driver to system.
5515 *
1da177e4 5516 * LOCKING:
0cba632b 5517 * PCI/etc. bus probe sem.
1da177e4
LT
5518 *
5519 * RETURNS:
0cba632b 5520 * New ata_port on success, for NULL on error.
1da177e4 5521 */
996139f1 5522static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5523 struct ata_host *host,
1da177e4
LT
5524 unsigned int port_no)
5525{
996139f1 5526 struct Scsi_Host *shost;
1da177e4 5527 struct ata_port *ap;
1da177e4
LT
5528
5529 DPRINTK("ENTER\n");
aec5c3c1 5530
52783c5d 5531 if (!ent->port_ops->error_handler &&
cca3974e 5532 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5533 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5534 port_no);
5535 return NULL;
5536 }
5537
996139f1
JG
5538 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5539 if (!shost)
1da177e4
LT
5540 return NULL;
5541
996139f1 5542 shost->transportt = &ata_scsi_transport_template;
30afc84c 5543
996139f1 5544 ap = ata_shost_to_port(shost);
1da177e4 5545
cca3974e 5546 ata_port_init(ap, host, ent, port_no);
996139f1 5547 ata_port_init_shost(ap, shost);
1da177e4 5548
1da177e4 5549 return ap;
1da177e4
LT
5550}
5551
f0d36efd
TH
5552static void ata_host_release(struct device *gendev, void *res)
5553{
5554 struct ata_host *host = dev_get_drvdata(gendev);
5555 int i;
5556
5557 for (i = 0; i < host->n_ports; i++) {
5558 struct ata_port *ap = host->ports[i];
5559
5560 if (!ap)
5561 continue;
5562
5563 if (ap->ops->port_stop)
5564 ap->ops->port_stop(ap);
5565
5566 scsi_host_put(ap->scsi_host);
5567 }
5568
5569 if (host->ops->host_stop)
5570 host->ops->host_stop(host);
5571}
5572
b03732f0 5573/**
cca3974e
JG
5574 * ata_sas_host_init - Initialize a host struct
5575 * @host: host to initialize
5576 * @dev: device host is attached to
5577 * @flags: host flags
5578 * @ops: port_ops
b03732f0
BK
5579 *
5580 * LOCKING:
5581 * PCI/etc. bus probe sem.
5582 *
5583 */
5584
cca3974e
JG
5585void ata_host_init(struct ata_host *host, struct device *dev,
5586 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5587{
cca3974e
JG
5588 spin_lock_init(&host->lock);
5589 host->dev = dev;
5590 host->flags = flags;
5591 host->ops = ops;
b03732f0
BK
5592}
5593
1da177e4 5594/**
0cba632b
JG
5595 * ata_device_add - Register hardware device with ATA and SCSI layers
5596 * @ent: Probe information describing hardware device to be registered
5597 *
5598 * This function processes the information provided in the probe
5599 * information struct @ent, allocates the necessary ATA and SCSI
5600 * host information structures, initializes them, and registers
5601 * everything with requisite kernel subsystems.
5602 *
5603 * This function requests irqs, probes the ATA bus, and probes
5604 * the SCSI bus.
1da177e4
LT
5605 *
5606 * LOCKING:
0cba632b 5607 * PCI/etc. bus probe sem.
1da177e4
LT
5608 *
5609 * RETURNS:
0cba632b 5610 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5611 */
057ace5e 5612int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5613{
6d0500df 5614 unsigned int i;
1da177e4 5615 struct device *dev = ent->dev;
cca3974e 5616 struct ata_host *host;
39b07ce6 5617 int rc;
1da177e4
LT
5618
5619 DPRINTK("ENTER\n");
f20b16ff 5620
02f076aa
AC
5621 if (ent->irq == 0) {
5622 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5623 return 0;
5624 }
f0d36efd
TH
5625
5626 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5627 return 0;
5628
1da177e4 5629 /* alloc a container for our list of ATA ports (buses) */
f0d36efd
TH
5630 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5631 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
cca3974e 5632 if (!host)
f0d36efd
TH
5633 goto err_out;
5634 devres_add(dev, host);
5635 dev_set_drvdata(dev, host);
1da177e4 5636
cca3974e
JG
5637 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5638 host->n_ports = ent->n_ports;
5639 host->irq = ent->irq;
5640 host->irq2 = ent->irq2;
0d5ff566 5641 host->iomap = ent->iomap;
cca3974e 5642 host->private_data = ent->private_data;
1da177e4
LT
5643
5644 /* register each port bound to this device */
cca3974e 5645 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5646 struct ata_port *ap;
5647 unsigned long xfer_mode_mask;
2ec7df04 5648 int irq_line = ent->irq;
1da177e4 5649
cca3974e 5650 ap = ata_port_add(ent, host, i);
c38778c3 5651 host->ports[i] = ap;
1da177e4
LT
5652 if (!ap)
5653 goto err_out;
5654
dd5b06c4
TH
5655 /* dummy? */
5656 if (ent->dummy_port_mask & (1 << i)) {
5657 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5658 ap->ops = &ata_dummy_port_ops;
5659 continue;
5660 }
5661
5662 /* start port */
5663 rc = ap->ops->port_start(ap);
5664 if (rc) {
cca3974e
JG
5665 host->ports[i] = NULL;
5666 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5667 goto err_out;
5668 }
5669
2ec7df04
AC
5670 /* Report the secondary IRQ for second channel legacy */
5671 if (i == 1 && ent->irq2)
5672 irq_line = ent->irq2;
5673
1da177e4
LT
5674 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5675 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5676 (ap->pio_mask << ATA_SHIFT_PIO);
5677
5678 /* print per-port info to dmesg */
0d5ff566
TH
5679 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5680 "ctl 0x%p bmdma 0x%p irq %d\n",
f15a1daf
TH
5681 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5682 ata_mode_string(xfer_mode_mask),
5683 ap->ioaddr.cmd_addr,
5684 ap->ioaddr.ctl_addr,
5685 ap->ioaddr.bmdma_addr,
2ec7df04 5686 irq_line);
1da177e4 5687
0f0a3ad3
TH
5688 /* freeze port before requesting IRQ */
5689 ata_eh_freeze_port(ap);
1da177e4
LT
5690 }
5691
2ec7df04 5692 /* obtain irq, that may be shared between channels */
f0d36efd
TH
5693 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5694 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
5695 if (rc) {
5696 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5697 ent->irq, rc);
1da177e4 5698 goto err_out;
39b07ce6 5699 }
1da177e4 5700
2ec7df04
AC
5701 /* do we have a second IRQ for the other channel, eg legacy mode */
5702 if (ent->irq2) {
5703 /* We will get weird core code crashes later if this is true
5704 so trap it now */
5705 BUG_ON(ent->irq == ent->irq2);
5706
f0d36efd
TH
5707 rc = devm_request_irq(dev, ent->irq2,
5708 ent->port_ops->irq_handler, ent->irq_flags,
5709 DRV_NAME, host);
2ec7df04
AC
5710 if (rc) {
5711 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5712 ent->irq2, rc);
f0d36efd 5713 goto err_out;
2ec7df04
AC
5714 }
5715 }
5716
f0d36efd 5717 /* resource acquisition complete */
b878ca5d 5718 devres_remove_group(dev, ata_device_add);
f0d36efd 5719
1da177e4
LT
5720 /* perform each probe synchronously */
5721 DPRINTK("probe begin\n");
cca3974e
JG
5722 for (i = 0; i < host->n_ports; i++) {
5723 struct ata_port *ap = host->ports[i];
5a04bf4b 5724 u32 scontrol;
1da177e4
LT
5725 int rc;
5726
5a04bf4b
TH
5727 /* init sata_spd_limit to the current value */
5728 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5729 int spd = (scontrol >> 4) & 0xf;
5730 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5731 }
5732 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5733
cca3974e 5734 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5735 if (rc) {
f15a1daf 5736 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5737 /* FIXME: do something useful here */
5738 /* FIXME: handle unconditional calls to
5739 * scsi_scan_host and ata_host_remove, below,
5740 * at the very least
5741 */
5742 }
3e706399 5743
52783c5d 5744 if (ap->ops->error_handler) {
1cdaf534 5745 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5746 unsigned long flags;
5747
5748 ata_port_probe(ap);
5749
5750 /* kick EH for boot probing */
ba6a1308 5751 spin_lock_irqsave(ap->lock, flags);
3e706399 5752
1cdaf534
TH
5753 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5754 ehi->action |= ATA_EH_SOFTRESET;
5755 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5756
b51e9e5d 5757 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5758 ata_port_schedule_eh(ap);
5759
ba6a1308 5760 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5761
5762 /* wait for EH to finish */
5763 ata_port_wait_eh(ap);
5764 } else {
5765 DPRINTK("ata%u: bus probe begin\n", ap->id);
5766 rc = ata_bus_probe(ap);
5767 DPRINTK("ata%u: bus probe end\n", ap->id);
5768
5769 if (rc) {
5770 /* FIXME: do something useful here?
5771 * Current libata behavior will
5772 * tear down everything when
5773 * the module is removed
5774 * or the h/w is unplugged.
5775 */
5776 }
5777 }
1da177e4
LT
5778 }
5779
5780 /* probes are done, now scan each port's disk(s) */
c893a3ae 5781 DPRINTK("host probe begin\n");
cca3974e
JG
5782 for (i = 0; i < host->n_ports; i++) {
5783 struct ata_port *ap = host->ports[i];
1da177e4 5784
644dd0cc 5785 ata_scsi_scan_host(ap);
1da177e4
LT
5786 }
5787
1da177e4
LT
5788 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5789 return ent->n_ports; /* success */
5790
f0d36efd
TH
5791 err_out:
5792 devres_release_group(dev, ata_device_add);
5793 dev_set_drvdata(dev, NULL);
5794 VPRINTK("EXIT, returning %d\n", rc);
1da177e4
LT
5795 return 0;
5796}
5797
720ba126
TH
5798/**
5799 * ata_port_detach - Detach ATA port in prepration of device removal
5800 * @ap: ATA port to be detached
5801 *
5802 * Detach all ATA devices and the associated SCSI devices of @ap;
5803 * then, remove the associated SCSI host. @ap is guaranteed to
5804 * be quiescent on return from this function.
5805 *
5806 * LOCKING:
5807 * Kernel thread context (may sleep).
5808 */
5809void ata_port_detach(struct ata_port *ap)
5810{
5811 unsigned long flags;
5812 int i;
5813
5814 if (!ap->ops->error_handler)
c3cf30a9 5815 goto skip_eh;
720ba126
TH
5816
5817 /* tell EH we're leaving & flush EH */
ba6a1308 5818 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5819 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5820 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5821
5822 ata_port_wait_eh(ap);
5823
5824 /* EH is now guaranteed to see UNLOADING, so no new device
5825 * will be attached. Disable all existing devices.
5826 */
ba6a1308 5827 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5828
5829 for (i = 0; i < ATA_MAX_DEVICES; i++)
5830 ata_dev_disable(&ap->device[i]);
5831
ba6a1308 5832 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5833
5834 /* Final freeze & EH. All in-flight commands are aborted. EH
5835 * will be skipped and retrials will be terminated with bad
5836 * target.
5837 */
ba6a1308 5838 spin_lock_irqsave(ap->lock, flags);
720ba126 5839 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5840 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5841
5842 ata_port_wait_eh(ap);
5843
5844 /* Flush hotplug task. The sequence is similar to
5845 * ata_port_flush_task().
5846 */
5847 flush_workqueue(ata_aux_wq);
5848 cancel_delayed_work(&ap->hotplug_task);
5849 flush_workqueue(ata_aux_wq);
5850
c3cf30a9 5851 skip_eh:
720ba126 5852 /* remove the associated SCSI host */
cca3974e 5853 scsi_remove_host(ap->scsi_host);
720ba126
TH
5854}
5855
0529c159
TH
5856/**
5857 * ata_host_detach - Detach all ports of an ATA host
5858 * @host: Host to detach
5859 *
5860 * Detach all ports of @host.
5861 *
5862 * LOCKING:
5863 * Kernel thread context (may sleep).
5864 */
5865void ata_host_detach(struct ata_host *host)
5866{
5867 int i;
5868
5869 for (i = 0; i < host->n_ports; i++)
5870 ata_port_detach(host->ports[i]);
5871}
5872
f6d950e2
BK
5873struct ata_probe_ent *
5874ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5875{
5876 struct ata_probe_ent *probe_ent;
5877
f0d36efd
TH
5878 /* XXX - the following if can go away once all LLDs are managed */
5879 if (!list_empty(&dev->devres_head))
5880 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
5881 else
5882 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
5883 if (!probe_ent) {
5884 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5885 kobject_name(&(dev->kobj)));
5886 return NULL;
5887 }
5888
5889 INIT_LIST_HEAD(&probe_ent->node);
5890 probe_ent->dev = dev;
5891
5892 probe_ent->sht = port->sht;
cca3974e 5893 probe_ent->port_flags = port->flags;
f6d950e2
BK
5894 probe_ent->pio_mask = port->pio_mask;
5895 probe_ent->mwdma_mask = port->mwdma_mask;
5896 probe_ent->udma_mask = port->udma_mask;
5897 probe_ent->port_ops = port->port_ops;
d639ca94 5898 probe_ent->private_data = port->private_data;
f6d950e2
BK
5899
5900 return probe_ent;
5901}
5902
1da177e4
LT
5903/**
5904 * ata_std_ports - initialize ioaddr with standard port offsets.
5905 * @ioaddr: IO address structure to be initialized
0baab86b
EF
5906 *
5907 * Utility function which initializes data_addr, error_addr,
5908 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5909 * device_addr, status_addr, and command_addr to standard offsets
5910 * relative to cmd_addr.
5911 *
5912 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 5913 */
0baab86b 5914
1da177e4
LT
5915void ata_std_ports(struct ata_ioports *ioaddr)
5916{
5917 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5918 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5919 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5920 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5921 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5922 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5923 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5924 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5925 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5926 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5927}
5928
0baab86b 5929
374b1873
JG
5930#ifdef CONFIG_PCI
5931
1da177e4
LT
5932/**
5933 * ata_pci_remove_one - PCI layer callback for device removal
5934 * @pdev: PCI device that was removed
5935 *
b878ca5d
TH
5936 * PCI layer indicates to libata via this hook that hot-unplug or
5937 * module unload event has occurred. Detach all ports. Resource
5938 * release is handled via devres.
1da177e4
LT
5939 *
5940 * LOCKING:
5941 * Inherited from PCI layer (may sleep).
5942 */
f0d36efd 5943void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
5944{
5945 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 5946 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 5947
b878ca5d 5948 ata_host_detach(host);
1da177e4
LT
5949}
5950
5951/* move to PCI subsystem */
057ace5e 5952int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
5953{
5954 unsigned long tmp = 0;
5955
5956 switch (bits->width) {
5957 case 1: {
5958 u8 tmp8 = 0;
5959 pci_read_config_byte(pdev, bits->reg, &tmp8);
5960 tmp = tmp8;
5961 break;
5962 }
5963 case 2: {
5964 u16 tmp16 = 0;
5965 pci_read_config_word(pdev, bits->reg, &tmp16);
5966 tmp = tmp16;
5967 break;
5968 }
5969 case 4: {
5970 u32 tmp32 = 0;
5971 pci_read_config_dword(pdev, bits->reg, &tmp32);
5972 tmp = tmp32;
5973 break;
5974 }
5975
5976 default:
5977 return -EINVAL;
5978 }
5979
5980 tmp &= bits->mask;
5981
5982 return (tmp == bits->val) ? 1 : 0;
5983}
9b847548 5984
3c5100c1 5985void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
5986{
5987 pci_save_state(pdev);
500530f6 5988
3c5100c1 5989 if (mesg.event == PM_EVENT_SUSPEND) {
500530f6
TH
5990 pci_disable_device(pdev);
5991 pci_set_power_state(pdev, PCI_D3hot);
5992 }
9b847548
JA
5993}
5994
553c4aa6 5995int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 5996{
553c4aa6
TH
5997 int rc;
5998
9b847548
JA
5999 pci_set_power_state(pdev, PCI_D0);
6000 pci_restore_state(pdev);
553c4aa6 6001
b878ca5d 6002 rc = pcim_enable_device(pdev);
553c4aa6
TH
6003 if (rc) {
6004 dev_printk(KERN_ERR, &pdev->dev,
6005 "failed to enable device after resume (%d)\n", rc);
6006 return rc;
6007 }
6008
9b847548 6009 pci_set_master(pdev);
553c4aa6 6010 return 0;
500530f6
TH
6011}
6012
3c5100c1 6013int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6014{
cca3974e 6015 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6016 int rc = 0;
6017
cca3974e 6018 rc = ata_host_suspend(host, mesg);
500530f6
TH
6019 if (rc)
6020 return rc;
6021
3c5100c1 6022 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6023
6024 return 0;
6025}
6026
6027int ata_pci_device_resume(struct pci_dev *pdev)
6028{
cca3974e 6029 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6030 int rc;
500530f6 6031
553c4aa6
TH
6032 rc = ata_pci_device_do_resume(pdev);
6033 if (rc == 0)
6034 ata_host_resume(host);
6035 return rc;
9b847548 6036}
1da177e4
LT
6037#endif /* CONFIG_PCI */
6038
6039
1da177e4
LT
6040static int __init ata_init(void)
6041{
a8601e5f 6042 ata_probe_timeout *= HZ;
1da177e4
LT
6043 ata_wq = create_workqueue("ata");
6044 if (!ata_wq)
6045 return -ENOMEM;
6046
453b07ac
TH
6047 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6048 if (!ata_aux_wq) {
6049 destroy_workqueue(ata_wq);
6050 return -ENOMEM;
6051 }
6052
1da177e4
LT
6053 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6054 return 0;
6055}
6056
6057static void __exit ata_exit(void)
6058{
6059 destroy_workqueue(ata_wq);
453b07ac 6060 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6061}
6062
a4625085 6063subsys_initcall(ata_init);
1da177e4
LT
6064module_exit(ata_exit);
6065
67846b30 6066static unsigned long ratelimit_time;
34af946a 6067static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6068
6069int ata_ratelimit(void)
6070{
6071 int rc;
6072 unsigned long flags;
6073
6074 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6075
6076 if (time_after(jiffies, ratelimit_time)) {
6077 rc = 1;
6078 ratelimit_time = jiffies + (HZ/5);
6079 } else
6080 rc = 0;
6081
6082 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6083
6084 return rc;
6085}
6086
c22daff4
TH
6087/**
6088 * ata_wait_register - wait until register value changes
6089 * @reg: IO-mapped register
6090 * @mask: Mask to apply to read register value
6091 * @val: Wait condition
6092 * @interval_msec: polling interval in milliseconds
6093 * @timeout_msec: timeout in milliseconds
6094 *
6095 * Waiting for some bits of register to change is a common
6096 * operation for ATA controllers. This function reads 32bit LE
6097 * IO-mapped register @reg and tests for the following condition.
6098 *
6099 * (*@reg & mask) != val
6100 *
6101 * If the condition is met, it returns; otherwise, the process is
6102 * repeated after @interval_msec until timeout.
6103 *
6104 * LOCKING:
6105 * Kernel thread context (may sleep)
6106 *
6107 * RETURNS:
6108 * The final register value.
6109 */
6110u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6111 unsigned long interval_msec,
6112 unsigned long timeout_msec)
6113{
6114 unsigned long timeout;
6115 u32 tmp;
6116
6117 tmp = ioread32(reg);
6118
6119 /* Calculate timeout _after_ the first read to make sure
6120 * preceding writes reach the controller before starting to
6121 * eat away the timeout.
6122 */
6123 timeout = jiffies + (timeout_msec * HZ) / 1000;
6124
6125 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6126 msleep(interval_msec);
6127 tmp = ioread32(reg);
6128 }
6129
6130 return tmp;
6131}
6132
dd5b06c4
TH
6133/*
6134 * Dummy port_ops
6135 */
6136static void ata_dummy_noret(struct ata_port *ap) { }
6137static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6138static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6139
6140static u8 ata_dummy_check_status(struct ata_port *ap)
6141{
6142 return ATA_DRDY;
6143}
6144
6145static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6146{
6147 return AC_ERR_SYSTEM;
6148}
6149
6150const struct ata_port_operations ata_dummy_port_ops = {
6151 .port_disable = ata_port_disable,
6152 .check_status = ata_dummy_check_status,
6153 .check_altstatus = ata_dummy_check_status,
6154 .dev_select = ata_noop_dev_select,
6155 .qc_prep = ata_noop_qc_prep,
6156 .qc_issue = ata_dummy_qc_issue,
6157 .freeze = ata_dummy_noret,
6158 .thaw = ata_dummy_noret,
6159 .error_handler = ata_dummy_noret,
6160 .post_internal_cmd = ata_dummy_qc_noret,
6161 .irq_clear = ata_dummy_noret,
6162 .port_start = ata_dummy_ret0,
6163 .port_stop = ata_dummy_noret,
6164};
6165
1da177e4
LT
6166/*
6167 * libata is essentially a library of internal helper functions for
6168 * low-level ATA host controller drivers. As such, the API/ABI is
6169 * likely to change as new drivers are added and updated.
6170 * Do not depend on ABI/API stability.
6171 */
6172
e9c83914
TH
6173EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6174EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6175EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6176EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6177EXPORT_SYMBOL_GPL(ata_std_bios_param);
6178EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6179EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6180EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6181EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6182EXPORT_SYMBOL_GPL(ata_sg_init);
6183EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6184EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6185EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6186EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6187EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6188EXPORT_SYMBOL_GPL(ata_tf_load);
6189EXPORT_SYMBOL_GPL(ata_tf_read);
6190EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6191EXPORT_SYMBOL_GPL(ata_std_dev_select);
6192EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6193EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6194EXPORT_SYMBOL_GPL(ata_check_status);
6195EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6196EXPORT_SYMBOL_GPL(ata_exec_command);
6197EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6198EXPORT_SYMBOL_GPL(ata_interrupt);
0d5ff566
TH
6199EXPORT_SYMBOL_GPL(ata_data_xfer);
6200EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6201EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6202EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6203EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6204EXPORT_SYMBOL_GPL(ata_bmdma_start);
6205EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6206EXPORT_SYMBOL_GPL(ata_bmdma_status);
6207EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6208EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6209EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6210EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6211EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6212EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6213EXPORT_SYMBOL_GPL(ata_port_probe);
3c567b7d 6214EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6215EXPORT_SYMBOL_GPL(sata_phy_debounce);
6216EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6217EXPORT_SYMBOL_GPL(sata_phy_reset);
6218EXPORT_SYMBOL_GPL(__sata_phy_reset);
6219EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6220EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6221EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6222EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6223EXPORT_SYMBOL_GPL(sata_std_hardreset);
6224EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6225EXPORT_SYMBOL_GPL(ata_dev_classify);
6226EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6227EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6228EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6229EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6230EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6231EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6232EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6233EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6234EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6235EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6236EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6237EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6238EXPORT_SYMBOL_GPL(sata_scr_valid);
6239EXPORT_SYMBOL_GPL(sata_scr_read);
6240EXPORT_SYMBOL_GPL(sata_scr_write);
6241EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6242EXPORT_SYMBOL_GPL(ata_port_online);
6243EXPORT_SYMBOL_GPL(ata_port_offline);
cca3974e
JG
6244EXPORT_SYMBOL_GPL(ata_host_suspend);
6245EXPORT_SYMBOL_GPL(ata_host_resume);
6a62a04d
TH
6246EXPORT_SYMBOL_GPL(ata_id_string);
6247EXPORT_SYMBOL_GPL(ata_id_c_string);
6919a0a6 6248EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6249EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6250
1bc4ccff 6251EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6252EXPORT_SYMBOL_GPL(ata_timing_compute);
6253EXPORT_SYMBOL_GPL(ata_timing_merge);
6254
1da177e4
LT
6255#ifdef CONFIG_PCI
6256EXPORT_SYMBOL_GPL(pci_test_config_bits);
6257EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6258EXPORT_SYMBOL_GPL(ata_pci_init_one);
6259EXPORT_SYMBOL_GPL(ata_pci_remove_one);
500530f6
TH
6260EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6261EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6262EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6263EXPORT_SYMBOL_GPL(ata_pci_device_resume);
67951ade
AC
6264EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6265EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6266#endif /* CONFIG_PCI */
9b847548 6267
9b847548
JA
6268EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6269EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
ece1d636 6270
ece1d636 6271EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6272EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6273EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6274EXPORT_SYMBOL_GPL(ata_port_freeze);
6275EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6276EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6277EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6278EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6279EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6280EXPORT_SYMBOL_GPL(ata_irq_on);
6281EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6282EXPORT_SYMBOL_GPL(ata_irq_ack);
6283EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);