]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/ata/libata-core.c
[libata] SCSI: simulator version, not device version, belongs in VPD
[mirror_ubuntu-eoan-kernel.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5 62
d7bb4cc7 63/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 67
3373efd8
TH
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
9f45cbd3 71static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
3373efd8 72static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 73static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
baf4fdfa
ML
88int atapi_passthru16 = 1;
89module_param(atapi_passthru16, int, 0444);
90MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
91
c3c013a2
JG
92int libata_fua = 0;
93module_param_named(fua, libata_fua, int, 0444);
94MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
95
1e999736
AC
96static int ata_ignore_hpa = 0;
97module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
98MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
99
a8601e5f
AM
100static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
101module_param(ata_probe_timeout, int, 0444);
102MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
103
d7d0dad6
JG
104int libata_noacpi = 1;
105module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
106MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
107
1da177e4
LT
108MODULE_AUTHOR("Jeff Garzik");
109MODULE_DESCRIPTION("Library module for ATA devices");
110MODULE_LICENSE("GPL");
111MODULE_VERSION(DRV_VERSION);
112
0baab86b 113
1da177e4
LT
114/**
115 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
116 * @tf: Taskfile to convert
1da177e4 117 * @pmp: Port multiplier port
9977126c
TH
118 * @is_cmd: This FIS is for command
119 * @fis: Buffer into which data will output
1da177e4
LT
120 *
121 * Converts a standard ATA taskfile to a Serial ATA
122 * FIS structure (Register - Host to Device).
123 *
124 * LOCKING:
125 * Inherited from caller.
126 */
9977126c 127void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 128{
9977126c
TH
129 fis[0] = 0x27; /* Register - Host to Device FIS */
130 fis[1] = pmp & 0xf; /* Port multiplier number*/
131 if (is_cmd)
132 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
133
1da177e4
LT
134 fis[2] = tf->command;
135 fis[3] = tf->feature;
136
137 fis[4] = tf->lbal;
138 fis[5] = tf->lbam;
139 fis[6] = tf->lbah;
140 fis[7] = tf->device;
141
142 fis[8] = tf->hob_lbal;
143 fis[9] = tf->hob_lbam;
144 fis[10] = tf->hob_lbah;
145 fis[11] = tf->hob_feature;
146
147 fis[12] = tf->nsect;
148 fis[13] = tf->hob_nsect;
149 fis[14] = 0;
150 fis[15] = tf->ctl;
151
152 fis[16] = 0;
153 fis[17] = 0;
154 fis[18] = 0;
155 fis[19] = 0;
156}
157
158/**
159 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
160 * @fis: Buffer from which data will be input
161 * @tf: Taskfile to output
162 *
e12a1be6 163 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
164 *
165 * LOCKING:
166 * Inherited from caller.
167 */
168
057ace5e 169void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
170{
171 tf->command = fis[2]; /* status */
172 tf->feature = fis[3]; /* error */
173
174 tf->lbal = fis[4];
175 tf->lbam = fis[5];
176 tf->lbah = fis[6];
177 tf->device = fis[7];
178
179 tf->hob_lbal = fis[8];
180 tf->hob_lbam = fis[9];
181 tf->hob_lbah = fis[10];
182
183 tf->nsect = fis[12];
184 tf->hob_nsect = fis[13];
185}
186
8cbd6df1
AL
187static const u8 ata_rw_cmds[] = {
188 /* pio multi */
189 ATA_CMD_READ_MULTI,
190 ATA_CMD_WRITE_MULTI,
191 ATA_CMD_READ_MULTI_EXT,
192 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
193 0,
194 0,
195 0,
196 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
197 /* pio */
198 ATA_CMD_PIO_READ,
199 ATA_CMD_PIO_WRITE,
200 ATA_CMD_PIO_READ_EXT,
201 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
202 0,
203 0,
204 0,
205 0,
8cbd6df1
AL
206 /* dma */
207 ATA_CMD_READ,
208 ATA_CMD_WRITE,
209 ATA_CMD_READ_EXT,
9a3dccc4
TH
210 ATA_CMD_WRITE_EXT,
211 0,
212 0,
213 0,
214 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 215};
1da177e4
LT
216
217/**
8cbd6df1 218 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
219 * @tf: command to examine and configure
220 * @dev: device tf belongs to
1da177e4 221 *
2e9edbf8 222 * Examine the device configuration and tf->flags to calculate
8cbd6df1 223 * the proper read/write commands and protocol to use.
1da177e4
LT
224 *
225 * LOCKING:
226 * caller.
227 */
bd056d7e 228static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 229{
9a3dccc4 230 u8 cmd;
1da177e4 231
9a3dccc4 232 int index, fua, lba48, write;
2e9edbf8 233
9a3dccc4 234 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
235 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
236 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 237
8cbd6df1
AL
238 if (dev->flags & ATA_DFLAG_PIO) {
239 tf->protocol = ATA_PROT_PIO;
9a3dccc4 240 index = dev->multi_count ? 0 : 8;
9af5c9c9 241 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
242 /* Unable to use DMA due to host limitation */
243 tf->protocol = ATA_PROT_PIO;
0565c26d 244 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
245 } else {
246 tf->protocol = ATA_PROT_DMA;
9a3dccc4 247 index = 16;
8cbd6df1 248 }
1da177e4 249
9a3dccc4
TH
250 cmd = ata_rw_cmds[index + fua + lba48 + write];
251 if (cmd) {
252 tf->command = cmd;
253 return 0;
254 }
255 return -1;
1da177e4
LT
256}
257
35b649fe
TH
258/**
259 * ata_tf_read_block - Read block address from ATA taskfile
260 * @tf: ATA taskfile of interest
261 * @dev: ATA device @tf belongs to
262 *
263 * LOCKING:
264 * None.
265 *
266 * Read block address from @tf. This function can handle all
267 * three address formats - LBA, LBA48 and CHS. tf->protocol and
268 * flags select the address format to use.
269 *
270 * RETURNS:
271 * Block address read from @tf.
272 */
273u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
274{
275 u64 block = 0;
276
277 if (tf->flags & ATA_TFLAG_LBA) {
278 if (tf->flags & ATA_TFLAG_LBA48) {
279 block |= (u64)tf->hob_lbah << 40;
280 block |= (u64)tf->hob_lbam << 32;
281 block |= tf->hob_lbal << 24;
282 } else
283 block |= (tf->device & 0xf) << 24;
284
285 block |= tf->lbah << 16;
286 block |= tf->lbam << 8;
287 block |= tf->lbal;
288 } else {
289 u32 cyl, head, sect;
290
291 cyl = tf->lbam | (tf->lbah << 8);
292 head = tf->device & 0xf;
293 sect = tf->lbal;
294
295 block = (cyl * dev->heads + head) * dev->sectors + sect;
296 }
297
298 return block;
299}
300
bd056d7e
TH
301/**
302 * ata_build_rw_tf - Build ATA taskfile for given read/write request
303 * @tf: Target ATA taskfile
304 * @dev: ATA device @tf belongs to
305 * @block: Block address
306 * @n_block: Number of blocks
307 * @tf_flags: RW/FUA etc...
308 * @tag: tag
309 *
310 * LOCKING:
311 * None.
312 *
313 * Build ATA taskfile @tf for read/write request described by
314 * @block, @n_block, @tf_flags and @tag on @dev.
315 *
316 * RETURNS:
317 *
318 * 0 on success, -ERANGE if the request is too large for @dev,
319 * -EINVAL if the request is invalid.
320 */
321int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
322 u64 block, u32 n_block, unsigned int tf_flags,
323 unsigned int tag)
324{
325 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
326 tf->flags |= tf_flags;
327
6d1245bf 328 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
329 /* yay, NCQ */
330 if (!lba_48_ok(block, n_block))
331 return -ERANGE;
332
333 tf->protocol = ATA_PROT_NCQ;
334 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
335
336 if (tf->flags & ATA_TFLAG_WRITE)
337 tf->command = ATA_CMD_FPDMA_WRITE;
338 else
339 tf->command = ATA_CMD_FPDMA_READ;
340
341 tf->nsect = tag << 3;
342 tf->hob_feature = (n_block >> 8) & 0xff;
343 tf->feature = n_block & 0xff;
344
345 tf->hob_lbah = (block >> 40) & 0xff;
346 tf->hob_lbam = (block >> 32) & 0xff;
347 tf->hob_lbal = (block >> 24) & 0xff;
348 tf->lbah = (block >> 16) & 0xff;
349 tf->lbam = (block >> 8) & 0xff;
350 tf->lbal = block & 0xff;
351
352 tf->device = 1 << 6;
353 if (tf->flags & ATA_TFLAG_FUA)
354 tf->device |= 1 << 7;
355 } else if (dev->flags & ATA_DFLAG_LBA) {
356 tf->flags |= ATA_TFLAG_LBA;
357
358 if (lba_28_ok(block, n_block)) {
359 /* use LBA28 */
360 tf->device |= (block >> 24) & 0xf;
361 } else if (lba_48_ok(block, n_block)) {
362 if (!(dev->flags & ATA_DFLAG_LBA48))
363 return -ERANGE;
364
365 /* use LBA48 */
366 tf->flags |= ATA_TFLAG_LBA48;
367
368 tf->hob_nsect = (n_block >> 8) & 0xff;
369
370 tf->hob_lbah = (block >> 40) & 0xff;
371 tf->hob_lbam = (block >> 32) & 0xff;
372 tf->hob_lbal = (block >> 24) & 0xff;
373 } else
374 /* request too large even for LBA48 */
375 return -ERANGE;
376
377 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
378 return -EINVAL;
379
380 tf->nsect = n_block & 0xff;
381
382 tf->lbah = (block >> 16) & 0xff;
383 tf->lbam = (block >> 8) & 0xff;
384 tf->lbal = block & 0xff;
385
386 tf->device |= ATA_LBA;
387 } else {
388 /* CHS */
389 u32 sect, head, cyl, track;
390
391 /* The request -may- be too large for CHS addressing. */
392 if (!lba_28_ok(block, n_block))
393 return -ERANGE;
394
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396 return -EINVAL;
397
398 /* Convert LBA to CHS */
399 track = (u32)block / dev->sectors;
400 cyl = track / dev->heads;
401 head = track % dev->heads;
402 sect = (u32)block % dev->sectors + 1;
403
404 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
405 (u32)block, track, cyl, head, sect);
406
407 /* Check whether the converted CHS can fit.
408 Cylinder: 0-65535
409 Head: 0-15
410 Sector: 1-255*/
411 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
412 return -ERANGE;
413
414 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
415 tf->lbal = sect;
416 tf->lbam = cyl;
417 tf->lbah = cyl >> 8;
418 tf->device |= head;
419 }
420
421 return 0;
422}
423
cb95d562
TH
424/**
425 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
426 * @pio_mask: pio_mask
427 * @mwdma_mask: mwdma_mask
428 * @udma_mask: udma_mask
429 *
430 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
431 * unsigned int xfer_mask.
432 *
433 * LOCKING:
434 * None.
435 *
436 * RETURNS:
437 * Packed xfer_mask.
438 */
439static unsigned int ata_pack_xfermask(unsigned int pio_mask,
440 unsigned int mwdma_mask,
441 unsigned int udma_mask)
442{
443 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
444 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
445 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
446}
447
c0489e4e
TH
448/**
449 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
450 * @xfer_mask: xfer_mask to unpack
451 * @pio_mask: resulting pio_mask
452 * @mwdma_mask: resulting mwdma_mask
453 * @udma_mask: resulting udma_mask
454 *
455 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
456 * Any NULL distination masks will be ignored.
457 */
458static void ata_unpack_xfermask(unsigned int xfer_mask,
459 unsigned int *pio_mask,
460 unsigned int *mwdma_mask,
461 unsigned int *udma_mask)
462{
463 if (pio_mask)
464 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
465 if (mwdma_mask)
466 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
467 if (udma_mask)
468 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
469}
470
cb95d562 471static const struct ata_xfer_ent {
be9a50c8 472 int shift, bits;
cb95d562
TH
473 u8 base;
474} ata_xfer_tbl[] = {
475 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
476 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
477 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
478 { -1, },
479};
480
481/**
482 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
483 * @xfer_mask: xfer_mask of interest
484 *
485 * Return matching XFER_* value for @xfer_mask. Only the highest
486 * bit of @xfer_mask is considered.
487 *
488 * LOCKING:
489 * None.
490 *
491 * RETURNS:
492 * Matching XFER_* value, 0 if no match found.
493 */
494static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
495{
496 int highbit = fls(xfer_mask) - 1;
497 const struct ata_xfer_ent *ent;
498
499 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
500 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
501 return ent->base + highbit - ent->shift;
502 return 0;
503}
504
505/**
506 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
507 * @xfer_mode: XFER_* of interest
508 *
509 * Return matching xfer_mask for @xfer_mode.
510 *
511 * LOCKING:
512 * None.
513 *
514 * RETURNS:
515 * Matching xfer_mask, 0 if no match found.
516 */
517static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
518{
519 const struct ata_xfer_ent *ent;
520
521 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
522 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
523 return 1 << (ent->shift + xfer_mode - ent->base);
524 return 0;
525}
526
527/**
528 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
529 * @xfer_mode: XFER_* of interest
530 *
531 * Return matching xfer_shift for @xfer_mode.
532 *
533 * LOCKING:
534 * None.
535 *
536 * RETURNS:
537 * Matching xfer_shift, -1 if no match found.
538 */
539static int ata_xfer_mode2shift(unsigned int xfer_mode)
540{
541 const struct ata_xfer_ent *ent;
542
543 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
544 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
545 return ent->shift;
546 return -1;
547}
548
1da177e4 549/**
1da7b0d0
TH
550 * ata_mode_string - convert xfer_mask to string
551 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
552 *
553 * Determine string which represents the highest speed
1da7b0d0 554 * (highest bit in @modemask).
1da177e4
LT
555 *
556 * LOCKING:
557 * None.
558 *
559 * RETURNS:
560 * Constant C string representing highest speed listed in
1da7b0d0 561 * @mode_mask, or the constant C string "<n/a>".
1da177e4 562 */
1da7b0d0 563static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 564{
75f554bc
TH
565 static const char * const xfer_mode_str[] = {
566 "PIO0",
567 "PIO1",
568 "PIO2",
569 "PIO3",
570 "PIO4",
b352e57d
AC
571 "PIO5",
572 "PIO6",
75f554bc
TH
573 "MWDMA0",
574 "MWDMA1",
575 "MWDMA2",
b352e57d
AC
576 "MWDMA3",
577 "MWDMA4",
75f554bc
TH
578 "UDMA/16",
579 "UDMA/25",
580 "UDMA/33",
581 "UDMA/44",
582 "UDMA/66",
583 "UDMA/100",
584 "UDMA/133",
585 "UDMA7",
586 };
1da7b0d0 587 int highbit;
1da177e4 588
1da7b0d0
TH
589 highbit = fls(xfer_mask) - 1;
590 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
591 return xfer_mode_str[highbit];
1da177e4 592 return "<n/a>";
1da177e4
LT
593}
594
4c360c81
TH
595static const char *sata_spd_string(unsigned int spd)
596{
597 static const char * const spd_str[] = {
598 "1.5 Gbps",
599 "3.0 Gbps",
600 };
601
602 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
603 return "<unknown>";
604 return spd_str[spd - 1];
605}
606
3373efd8 607void ata_dev_disable(struct ata_device *dev)
0b8efb0a 608{
09d7f9b0 609 if (ata_dev_enabled(dev)) {
9af5c9c9 610 if (ata_msg_drv(dev->link->ap))
09d7f9b0 611 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
612 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
613 ATA_DNXFER_QUIET);
0b8efb0a
TH
614 dev->class++;
615 }
616}
617
1da177e4 618/**
0d5ff566 619 * ata_devchk - PATA device presence detection
1da177e4
LT
620 * @ap: ATA channel to examine
621 * @device: Device to examine (starting at zero)
622 *
623 * This technique was originally described in
624 * Hale Landis's ATADRVR (www.ata-atapi.com), and
625 * later found its way into the ATA/ATAPI spec.
626 *
627 * Write a pattern to the ATA shadow registers,
628 * and if a device is present, it will respond by
629 * correctly storing and echoing back the
630 * ATA shadow register contents.
631 *
632 * LOCKING:
633 * caller.
634 */
635
0d5ff566 636static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
637{
638 struct ata_ioports *ioaddr = &ap->ioaddr;
639 u8 nsect, lbal;
640
641 ap->ops->dev_select(ap, device);
642
0d5ff566
TH
643 iowrite8(0x55, ioaddr->nsect_addr);
644 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 645
0d5ff566
TH
646 iowrite8(0xaa, ioaddr->nsect_addr);
647 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 648
0d5ff566
TH
649 iowrite8(0x55, ioaddr->nsect_addr);
650 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 651
0d5ff566
TH
652 nsect = ioread8(ioaddr->nsect_addr);
653 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
654
655 if ((nsect == 0x55) && (lbal == 0xaa))
656 return 1; /* we found a device */
657
658 return 0; /* nothing found */
659}
660
1da177e4
LT
661/**
662 * ata_dev_classify - determine device type based on ATA-spec signature
663 * @tf: ATA taskfile register set for device to be identified
664 *
665 * Determine from taskfile register contents whether a device is
666 * ATA or ATAPI, as per "Signature and persistence" section
667 * of ATA/PI spec (volume 1, sect 5.14).
668 *
669 * LOCKING:
670 * None.
671 *
672 * RETURNS:
673 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
674 * the event of failure.
675 */
676
057ace5e 677unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
678{
679 /* Apple's open source Darwin code hints that some devices only
680 * put a proper signature into the LBA mid/high registers,
681 * So, we only check those. It's sufficient for uniqueness.
682 */
683
684 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
685 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
686 DPRINTK("found ATA device by sig\n");
687 return ATA_DEV_ATA;
688 }
689
690 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
691 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
692 DPRINTK("found ATAPI device by sig\n");
693 return ATA_DEV_ATAPI;
694 }
695
696 DPRINTK("unknown device\n");
697 return ATA_DEV_UNKNOWN;
698}
699
700/**
701 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
702 * @dev: ATA device to classify (starting at zero)
703 * @present: device seems present
b4dc7623 704 * @r_err: Value of error register on completion
1da177e4
LT
705 *
706 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
707 * an ATA/ATAPI-defined set of values is placed in the ATA
708 * shadow registers, indicating the results of device detection
709 * and diagnostics.
710 *
711 * Select the ATA device, and read the values from the ATA shadow
712 * registers. Then parse according to the Error register value,
713 * and the spec-defined values examined by ata_dev_classify().
714 *
715 * LOCKING:
716 * caller.
b4dc7623
TH
717 *
718 * RETURNS:
719 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 720 */
3f19859e
TH
721unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
722 u8 *r_err)
1da177e4 723{
3f19859e 724 struct ata_port *ap = dev->link->ap;
1da177e4
LT
725 struct ata_taskfile tf;
726 unsigned int class;
727 u8 err;
728
3f19859e 729 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
730
731 memset(&tf, 0, sizeof(tf));
732
1da177e4 733 ap->ops->tf_read(ap, &tf);
0169e284 734 err = tf.feature;
b4dc7623
TH
735 if (r_err)
736 *r_err = err;
1da177e4 737
93590859 738 /* see if device passed diags: if master then continue and warn later */
3f19859e 739 if (err == 0 && dev->devno == 0)
93590859 740 /* diagnostic fail : do nothing _YET_ */
3f19859e 741 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 742 else if (err == 1)
1da177e4 743 /* do nothing */ ;
3f19859e 744 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
745 /* do nothing */ ;
746 else
b4dc7623 747 return ATA_DEV_NONE;
1da177e4 748
b4dc7623 749 /* determine if device is ATA or ATAPI */
1da177e4 750 class = ata_dev_classify(&tf);
b4dc7623 751
d7fbee05
TH
752 if (class == ATA_DEV_UNKNOWN) {
753 /* If the device failed diagnostic, it's likely to
754 * have reported incorrect device signature too.
755 * Assume ATA device if the device seems present but
756 * device signature is invalid with diagnostic
757 * failure.
758 */
759 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
760 class = ATA_DEV_ATA;
761 else
762 class = ATA_DEV_NONE;
763 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
764 class = ATA_DEV_NONE;
765
b4dc7623 766 return class;
1da177e4
LT
767}
768
769/**
6a62a04d 770 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
771 * @id: IDENTIFY DEVICE results we will examine
772 * @s: string into which data is output
773 * @ofs: offset into identify device page
774 * @len: length of string to return. must be an even number.
775 *
776 * The strings in the IDENTIFY DEVICE page are broken up into
777 * 16-bit chunks. Run through the string, and output each
778 * 8-bit chunk linearly, regardless of platform.
779 *
780 * LOCKING:
781 * caller.
782 */
783
6a62a04d
TH
784void ata_id_string(const u16 *id, unsigned char *s,
785 unsigned int ofs, unsigned int len)
1da177e4
LT
786{
787 unsigned int c;
788
789 while (len > 0) {
790 c = id[ofs] >> 8;
791 *s = c;
792 s++;
793
794 c = id[ofs] & 0xff;
795 *s = c;
796 s++;
797
798 ofs++;
799 len -= 2;
800 }
801}
802
0e949ff3 803/**
6a62a04d 804 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
805 * @id: IDENTIFY DEVICE results we will examine
806 * @s: string into which data is output
807 * @ofs: offset into identify device page
808 * @len: length of string to return. must be an odd number.
809 *
6a62a04d 810 * This function is identical to ata_id_string except that it
0e949ff3
TH
811 * trims trailing spaces and terminates the resulting string with
812 * null. @len must be actual maximum length (even number) + 1.
813 *
814 * LOCKING:
815 * caller.
816 */
6a62a04d
TH
817void ata_id_c_string(const u16 *id, unsigned char *s,
818 unsigned int ofs, unsigned int len)
0e949ff3
TH
819{
820 unsigned char *p;
821
822 WARN_ON(!(len & 1));
823
6a62a04d 824 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
825
826 p = s + strnlen(s, len - 1);
827 while (p > s && p[-1] == ' ')
828 p--;
829 *p = '\0';
830}
0baab86b 831
db6f8759
TH
832static u64 ata_id_n_sectors(const u16 *id)
833{
834 if (ata_id_has_lba(id)) {
835 if (ata_id_has_lba48(id))
836 return ata_id_u64(id, 100);
837 else
838 return ata_id_u32(id, 60);
839 } else {
840 if (ata_id_current_chs_valid(id))
841 return ata_id_u32(id, 57);
842 else
843 return id[1] * id[3] * id[6];
844 }
845}
846
1e999736
AC
847static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
848{
849 u64 sectors = 0;
850
851 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
852 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
853 sectors |= (tf->hob_lbal & 0xff) << 24;
854 sectors |= (tf->lbah & 0xff) << 16;
855 sectors |= (tf->lbam & 0xff) << 8;
856 sectors |= (tf->lbal & 0xff);
857
858 return ++sectors;
859}
860
861static u64 ata_tf_to_lba(struct ata_taskfile *tf)
862{
863 u64 sectors = 0;
864
865 sectors |= (tf->device & 0x0f) << 24;
866 sectors |= (tf->lbah & 0xff) << 16;
867 sectors |= (tf->lbam & 0xff) << 8;
868 sectors |= (tf->lbal & 0xff);
869
870 return ++sectors;
871}
872
873/**
c728a914
TH
874 * ata_read_native_max_address - Read native max address
875 * @dev: target device
876 * @max_sectors: out parameter for the result native max address
1e999736 877 *
c728a914
TH
878 * Perform an LBA48 or LBA28 native size query upon the device in
879 * question.
1e999736 880 *
c728a914
TH
881 * RETURNS:
882 * 0 on success, -EACCES if command is aborted by the drive.
883 * -EIO on other errors.
1e999736 884 */
c728a914 885static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 886{
c728a914 887 unsigned int err_mask;
1e999736 888 struct ata_taskfile tf;
c728a914 889 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
890
891 ata_tf_init(dev, &tf);
892
c728a914 893 /* always clear all address registers */
1e999736 894 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 895
c728a914
TH
896 if (lba48) {
897 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
898 tf.flags |= ATA_TFLAG_LBA48;
899 } else
900 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 901
1e999736 902 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
903 tf.device |= ATA_LBA;
904
905 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
906 if (err_mask) {
907 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
908 "max address (err_mask=0x%x)\n", err_mask);
909 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
910 return -EACCES;
911 return -EIO;
912 }
1e999736 913
c728a914
TH
914 if (lba48)
915 *max_sectors = ata_tf_to_lba48(&tf);
916 else
917 *max_sectors = ata_tf_to_lba(&tf);
1e999736 918
c728a914 919 return 0;
1e999736
AC
920}
921
922/**
c728a914
TH
923 * ata_set_max_sectors - Set max sectors
924 * @dev: target device
6b38d1d1 925 * @new_sectors: new max sectors value to set for the device
1e999736 926 *
c728a914
TH
927 * Set max sectors of @dev to @new_sectors.
928 *
929 * RETURNS:
930 * 0 on success, -EACCES if command is aborted or denied (due to
931 * previous non-volatile SET_MAX) by the drive. -EIO on other
932 * errors.
1e999736 933 */
05027adc 934static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 935{
c728a914 936 unsigned int err_mask;
1e999736 937 struct ata_taskfile tf;
c728a914 938 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
939
940 new_sectors--;
941
942 ata_tf_init(dev, &tf);
943
1e999736 944 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
945
946 if (lba48) {
947 tf.command = ATA_CMD_SET_MAX_EXT;
948 tf.flags |= ATA_TFLAG_LBA48;
949
950 tf.hob_lbal = (new_sectors >> 24) & 0xff;
951 tf.hob_lbam = (new_sectors >> 32) & 0xff;
952 tf.hob_lbah = (new_sectors >> 40) & 0xff;
953 } else
954 tf.command = ATA_CMD_SET_MAX;
955
1e999736 956 tf.protocol |= ATA_PROT_NODATA;
c728a914 957 tf.device |= ATA_LBA;
1e999736
AC
958
959 tf.lbal = (new_sectors >> 0) & 0xff;
960 tf.lbam = (new_sectors >> 8) & 0xff;
961 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 962
c728a914
TH
963 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
964 if (err_mask) {
965 ata_dev_printk(dev, KERN_WARNING, "failed to set "
966 "max address (err_mask=0x%x)\n", err_mask);
967 if (err_mask == AC_ERR_DEV &&
968 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
969 return -EACCES;
970 return -EIO;
971 }
972
c728a914 973 return 0;
1e999736
AC
974}
975
976/**
977 * ata_hpa_resize - Resize a device with an HPA set
978 * @dev: Device to resize
979 *
980 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
981 * it if required to the full size of the media. The caller must check
982 * the drive has the HPA feature set enabled.
05027adc
TH
983 *
984 * RETURNS:
985 * 0 on success, -errno on failure.
1e999736 986 */
05027adc 987static int ata_hpa_resize(struct ata_device *dev)
1e999736 988{
05027adc
TH
989 struct ata_eh_context *ehc = &dev->link->eh_context;
990 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
991 u64 sectors = ata_id_n_sectors(dev->id);
992 u64 native_sectors;
c728a914 993 int rc;
a617c09f 994
05027adc
TH
995 /* do we need to do it? */
996 if (dev->class != ATA_DEV_ATA ||
997 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
998 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 999 return 0;
1e999736 1000
05027adc
TH
1001 /* read native max address */
1002 rc = ata_read_native_max_address(dev, &native_sectors);
1003 if (rc) {
1004 /* If HPA isn't going to be unlocked, skip HPA
1005 * resizing from the next try.
1006 */
1007 if (!ata_ignore_hpa) {
1008 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1009 "broken, will skip HPA handling\n");
1010 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1011
1012 /* we can continue if device aborted the command */
1013 if (rc == -EACCES)
1014 rc = 0;
1e999736 1015 }
37301a55 1016
05027adc
TH
1017 return rc;
1018 }
1019
1020 /* nothing to do? */
1021 if (native_sectors <= sectors || !ata_ignore_hpa) {
1022 if (!print_info || native_sectors == sectors)
1023 return 0;
1024
1025 if (native_sectors > sectors)
1026 ata_dev_printk(dev, KERN_INFO,
1027 "HPA detected: current %llu, native %llu\n",
1028 (unsigned long long)sectors,
1029 (unsigned long long)native_sectors);
1030 else if (native_sectors < sectors)
1031 ata_dev_printk(dev, KERN_WARNING,
1032 "native sectors (%llu) is smaller than "
1033 "sectors (%llu)\n",
1034 (unsigned long long)native_sectors,
1035 (unsigned long long)sectors);
1036 return 0;
1037 }
1038
1039 /* let's unlock HPA */
1040 rc = ata_set_max_sectors(dev, native_sectors);
1041 if (rc == -EACCES) {
1042 /* if device aborted the command, skip HPA resizing */
1043 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1044 "(%llu -> %llu), skipping HPA handling\n",
1045 (unsigned long long)sectors,
1046 (unsigned long long)native_sectors);
1047 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1048 return 0;
1049 } else if (rc)
1050 return rc;
1051
1052 /* re-read IDENTIFY data */
1053 rc = ata_dev_reread_id(dev, 0);
1054 if (rc) {
1055 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1056 "data after HPA resizing\n");
1057 return rc;
1058 }
1059
1060 if (print_info) {
1061 u64 new_sectors = ata_id_n_sectors(dev->id);
1062 ata_dev_printk(dev, KERN_INFO,
1063 "HPA unlocked: %llu -> %llu, native %llu\n",
1064 (unsigned long long)sectors,
1065 (unsigned long long)new_sectors,
1066 (unsigned long long)native_sectors);
1067 }
1068
1069 return 0;
1e999736
AC
1070}
1071
10305f0f
AC
1072/**
1073 * ata_id_to_dma_mode - Identify DMA mode from id block
1074 * @dev: device to identify
cc261267 1075 * @unknown: mode to assume if we cannot tell
10305f0f
AC
1076 *
1077 * Set up the timing values for the device based upon the identify
1078 * reported values for the DMA mode. This function is used by drivers
1079 * which rely upon firmware configured modes, but wish to report the
1080 * mode correctly when possible.
1081 *
1082 * In addition we emit similarly formatted messages to the default
1083 * ata_dev_set_mode handler, in order to provide consistency of
1084 * presentation.
1085 */
1086
1087void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1088{
1089 unsigned int mask;
1090 u8 mode;
1091
1092 /* Pack the DMA modes */
1093 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1094 if (dev->id[53] & 0x04)
1095 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1096
1097 /* Select the mode in use */
1098 mode = ata_xfer_mask2mode(mask);
1099
1100 if (mode != 0) {
1101 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1102 ata_mode_string(mask));
1103 } else {
1104 /* SWDMA perhaps ? */
1105 mode = unknown;
1106 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1107 }
1108
1109 /* Configure the device reporting */
1110 dev->xfer_mode = mode;
1111 dev->xfer_shift = ata_xfer_mode2shift(mode);
1112}
1113
0baab86b
EF
1114/**
1115 * ata_noop_dev_select - Select device 0/1 on ATA bus
1116 * @ap: ATA channel to manipulate
1117 * @device: ATA device (numbered from zero) to select
1118 *
1119 * This function performs no actual function.
1120 *
1121 * May be used as the dev_select() entry in ata_port_operations.
1122 *
1123 * LOCKING:
1124 * caller.
1125 */
1da177e4
LT
1126void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1127{
1128}
1129
0baab86b 1130
1da177e4
LT
1131/**
1132 * ata_std_dev_select - Select device 0/1 on ATA bus
1133 * @ap: ATA channel to manipulate
1134 * @device: ATA device (numbered from zero) to select
1135 *
1136 * Use the method defined in the ATA specification to
1137 * make either device 0, or device 1, active on the
0baab86b
EF
1138 * ATA channel. Works with both PIO and MMIO.
1139 *
1140 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1141 *
1142 * LOCKING:
1143 * caller.
1144 */
1145
1146void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1147{
1148 u8 tmp;
1149
1150 if (device == 0)
1151 tmp = ATA_DEVICE_OBS;
1152 else
1153 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1154
0d5ff566 1155 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1156 ata_pause(ap); /* needed; also flushes, for mmio */
1157}
1158
1159/**
1160 * ata_dev_select - Select device 0/1 on ATA bus
1161 * @ap: ATA channel to manipulate
1162 * @device: ATA device (numbered from zero) to select
1163 * @wait: non-zero to wait for Status register BSY bit to clear
1164 * @can_sleep: non-zero if context allows sleeping
1165 *
1166 * Use the method defined in the ATA specification to
1167 * make either device 0, or device 1, active on the
1168 * ATA channel.
1169 *
1170 * This is a high-level version of ata_std_dev_select(),
1171 * which additionally provides the services of inserting
1172 * the proper pauses and status polling, where needed.
1173 *
1174 * LOCKING:
1175 * caller.
1176 */
1177
1178void ata_dev_select(struct ata_port *ap, unsigned int device,
1179 unsigned int wait, unsigned int can_sleep)
1180{
88574551 1181 if (ata_msg_probe(ap))
44877b4e
TH
1182 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1183 "device %u, wait %u\n", device, wait);
1da177e4
LT
1184
1185 if (wait)
1186 ata_wait_idle(ap);
1187
1188 ap->ops->dev_select(ap, device);
1189
1190 if (wait) {
9af5c9c9 1191 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1192 msleep(150);
1193 ata_wait_idle(ap);
1194 }
1195}
1196
1197/**
1198 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1199 * @id: IDENTIFY DEVICE page to dump
1da177e4 1200 *
0bd3300a
TH
1201 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1202 * page.
1da177e4
LT
1203 *
1204 * LOCKING:
1205 * caller.
1206 */
1207
0bd3300a 1208static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1209{
1210 DPRINTK("49==0x%04x "
1211 "53==0x%04x "
1212 "63==0x%04x "
1213 "64==0x%04x "
1214 "75==0x%04x \n",
0bd3300a
TH
1215 id[49],
1216 id[53],
1217 id[63],
1218 id[64],
1219 id[75]);
1da177e4
LT
1220 DPRINTK("80==0x%04x "
1221 "81==0x%04x "
1222 "82==0x%04x "
1223 "83==0x%04x "
1224 "84==0x%04x \n",
0bd3300a
TH
1225 id[80],
1226 id[81],
1227 id[82],
1228 id[83],
1229 id[84]);
1da177e4
LT
1230 DPRINTK("88==0x%04x "
1231 "93==0x%04x\n",
0bd3300a
TH
1232 id[88],
1233 id[93]);
1da177e4
LT
1234}
1235
cb95d562
TH
1236/**
1237 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1238 * @id: IDENTIFY data to compute xfer mask from
1239 *
1240 * Compute the xfermask for this device. This is not as trivial
1241 * as it seems if we must consider early devices correctly.
1242 *
1243 * FIXME: pre IDE drive timing (do we care ?).
1244 *
1245 * LOCKING:
1246 * None.
1247 *
1248 * RETURNS:
1249 * Computed xfermask
1250 */
1251static unsigned int ata_id_xfermask(const u16 *id)
1252{
1253 unsigned int pio_mask, mwdma_mask, udma_mask;
1254
1255 /* Usual case. Word 53 indicates word 64 is valid */
1256 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1257 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1258 pio_mask <<= 3;
1259 pio_mask |= 0x7;
1260 } else {
1261 /* If word 64 isn't valid then Word 51 high byte holds
1262 * the PIO timing number for the maximum. Turn it into
1263 * a mask.
1264 */
7a0f1c8a 1265 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1266 if (mode < 5) /* Valid PIO range */
1267 pio_mask = (2 << mode) - 1;
1268 else
1269 pio_mask = 1;
cb95d562
TH
1270
1271 /* But wait.. there's more. Design your standards by
1272 * committee and you too can get a free iordy field to
1273 * process. However its the speeds not the modes that
1274 * are supported... Note drivers using the timing API
1275 * will get this right anyway
1276 */
1277 }
1278
1279 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1280
b352e57d
AC
1281 if (ata_id_is_cfa(id)) {
1282 /*
1283 * Process compact flash extended modes
1284 */
1285 int pio = id[163] & 0x7;
1286 int dma = (id[163] >> 3) & 7;
1287
1288 if (pio)
1289 pio_mask |= (1 << 5);
1290 if (pio > 1)
1291 pio_mask |= (1 << 6);
1292 if (dma)
1293 mwdma_mask |= (1 << 3);
1294 if (dma > 1)
1295 mwdma_mask |= (1 << 4);
1296 }
1297
fb21f0d0
TH
1298 udma_mask = 0;
1299 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1300 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1301
1302 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1303}
1304
86e45b6b
TH
1305/**
1306 * ata_port_queue_task - Queue port_task
1307 * @ap: The ata_port to queue port_task for
e2a7f77a 1308 * @fn: workqueue function to be scheduled
65f27f38 1309 * @data: data for @fn to use
e2a7f77a 1310 * @delay: delay time for workqueue function
86e45b6b
TH
1311 *
1312 * Schedule @fn(@data) for execution after @delay jiffies using
1313 * port_task. There is one port_task per port and it's the
1314 * user(low level driver)'s responsibility to make sure that only
1315 * one task is active at any given time.
1316 *
1317 * libata core layer takes care of synchronization between
1318 * port_task and EH. ata_port_queue_task() may be ignored for EH
1319 * synchronization.
1320 *
1321 * LOCKING:
1322 * Inherited from caller.
1323 */
65f27f38 1324void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1325 unsigned long delay)
1326{
65f27f38
DH
1327 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1328 ap->port_task_data = data;
86e45b6b 1329
45a66c1c
ON
1330 /* may fail if ata_port_flush_task() in progress */
1331 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1332}
1333
1334/**
1335 * ata_port_flush_task - Flush port_task
1336 * @ap: The ata_port to flush port_task for
1337 *
1338 * After this function completes, port_task is guranteed not to
1339 * be running or scheduled.
1340 *
1341 * LOCKING:
1342 * Kernel thread context (may sleep)
1343 */
1344void ata_port_flush_task(struct ata_port *ap)
1345{
86e45b6b
TH
1346 DPRINTK("ENTER\n");
1347
45a66c1c 1348 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1349
0dd4b21f
BP
1350 if (ata_msg_ctl(ap))
1351 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1352}
1353
7102d230 1354static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1355{
77853bf2 1356 struct completion *waiting = qc->private_data;
a2a7a662 1357
a2a7a662 1358 complete(waiting);
a2a7a662
TH
1359}
1360
1361/**
2432697b 1362 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1363 * @dev: Device to which the command is sent
1364 * @tf: Taskfile registers for the command and the result
d69cf37d 1365 * @cdb: CDB for packet command
a2a7a662 1366 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1367 * @sg: sg list for the data buffer of the command
1368 * @n_elem: Number of sg entries
a2a7a662
TH
1369 *
1370 * Executes libata internal command with timeout. @tf contains
1371 * command on entry and result on return. Timeout and error
1372 * conditions are reported via return value. No recovery action
1373 * is taken after a command times out. It's caller's duty to
1374 * clean up after timeout.
1375 *
1376 * LOCKING:
1377 * None. Should be called with kernel context, might sleep.
551e8889
TH
1378 *
1379 * RETURNS:
1380 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1381 */
2432697b
TH
1382unsigned ata_exec_internal_sg(struct ata_device *dev,
1383 struct ata_taskfile *tf, const u8 *cdb,
1384 int dma_dir, struct scatterlist *sg,
1385 unsigned int n_elem)
a2a7a662 1386{
9af5c9c9
TH
1387 struct ata_link *link = dev->link;
1388 struct ata_port *ap = link->ap;
a2a7a662
TH
1389 u8 command = tf->command;
1390 struct ata_queued_cmd *qc;
2ab7db1f 1391 unsigned int tag, preempted_tag;
dedaf2b0 1392 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1393 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1394 unsigned long flags;
77853bf2 1395 unsigned int err_mask;
d95a717f 1396 int rc;
a2a7a662 1397
ba6a1308 1398 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1399
e3180499 1400 /* no internal command while frozen */
b51e9e5d 1401 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1402 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1403 return AC_ERR_SYSTEM;
1404 }
1405
2ab7db1f 1406 /* initialize internal qc */
a2a7a662 1407
2ab7db1f
TH
1408 /* XXX: Tag 0 is used for drivers with legacy EH as some
1409 * drivers choke if any other tag is given. This breaks
1410 * ata_tag_internal() test for those drivers. Don't use new
1411 * EH stuff without converting to it.
1412 */
1413 if (ap->ops->error_handler)
1414 tag = ATA_TAG_INTERNAL;
1415 else
1416 tag = 0;
1417
6cec4a39 1418 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1419 BUG();
f69499f4 1420 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1421
1422 qc->tag = tag;
1423 qc->scsicmd = NULL;
1424 qc->ap = ap;
1425 qc->dev = dev;
1426 ata_qc_reinit(qc);
1427
9af5c9c9
TH
1428 preempted_tag = link->active_tag;
1429 preempted_sactive = link->sactive;
dedaf2b0 1430 preempted_qc_active = ap->qc_active;
9af5c9c9
TH
1431 link->active_tag = ATA_TAG_POISON;
1432 link->sactive = 0;
dedaf2b0 1433 ap->qc_active = 0;
2ab7db1f
TH
1434
1435 /* prepare & issue qc */
a2a7a662 1436 qc->tf = *tf;
d69cf37d
TH
1437 if (cdb)
1438 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1439 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1440 qc->dma_dir = dma_dir;
1441 if (dma_dir != DMA_NONE) {
2432697b
TH
1442 unsigned int i, buflen = 0;
1443
1444 for (i = 0; i < n_elem; i++)
1445 buflen += sg[i].length;
1446
1447 ata_sg_init(qc, sg, n_elem);
49c80429 1448 qc->nbytes = buflen;
a2a7a662
TH
1449 }
1450
77853bf2 1451 qc->private_data = &wait;
a2a7a662
TH
1452 qc->complete_fn = ata_qc_complete_internal;
1453
8e0e694a 1454 ata_qc_issue(qc);
a2a7a662 1455
ba6a1308 1456 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1457
a8601e5f 1458 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1459
1460 ata_port_flush_task(ap);
41ade50c 1461
d95a717f 1462 if (!rc) {
ba6a1308 1463 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1464
1465 /* We're racing with irq here. If we lose, the
1466 * following test prevents us from completing the qc
d95a717f
TH
1467 * twice. If we win, the port is frozen and will be
1468 * cleaned up by ->post_internal_cmd().
a2a7a662 1469 */
77853bf2 1470 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1471 qc->err_mask |= AC_ERR_TIMEOUT;
1472
1473 if (ap->ops->error_handler)
1474 ata_port_freeze(ap);
1475 else
1476 ata_qc_complete(qc);
f15a1daf 1477
0dd4b21f
BP
1478 if (ata_msg_warn(ap))
1479 ata_dev_printk(dev, KERN_WARNING,
88574551 1480 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1481 }
1482
ba6a1308 1483 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1484 }
1485
d95a717f
TH
1486 /* do post_internal_cmd */
1487 if (ap->ops->post_internal_cmd)
1488 ap->ops->post_internal_cmd(qc);
1489
a51d644a
TH
1490 /* perform minimal error analysis */
1491 if (qc->flags & ATA_QCFLAG_FAILED) {
1492 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1493 qc->err_mask |= AC_ERR_DEV;
1494
1495 if (!qc->err_mask)
1496 qc->err_mask |= AC_ERR_OTHER;
1497
1498 if (qc->err_mask & ~AC_ERR_OTHER)
1499 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1500 }
1501
15869303 1502 /* finish up */
ba6a1308 1503 spin_lock_irqsave(ap->lock, flags);
15869303 1504
e61e0672 1505 *tf = qc->result_tf;
77853bf2
TH
1506 err_mask = qc->err_mask;
1507
1508 ata_qc_free(qc);
9af5c9c9
TH
1509 link->active_tag = preempted_tag;
1510 link->sactive = preempted_sactive;
dedaf2b0 1511 ap->qc_active = preempted_qc_active;
77853bf2 1512
1f7dd3e9
TH
1513 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1514 * Until those drivers are fixed, we detect the condition
1515 * here, fail the command with AC_ERR_SYSTEM and reenable the
1516 * port.
1517 *
1518 * Note that this doesn't change any behavior as internal
1519 * command failure results in disabling the device in the
1520 * higher layer for LLDDs without new reset/EH callbacks.
1521 *
1522 * Kill the following code as soon as those drivers are fixed.
1523 */
198e0fed 1524 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1525 err_mask |= AC_ERR_SYSTEM;
1526 ata_port_probe(ap);
1527 }
1528
ba6a1308 1529 spin_unlock_irqrestore(ap->lock, flags);
15869303 1530
77853bf2 1531 return err_mask;
a2a7a662
TH
1532}
1533
2432697b 1534/**
33480a0e 1535 * ata_exec_internal - execute libata internal command
2432697b
TH
1536 * @dev: Device to which the command is sent
1537 * @tf: Taskfile registers for the command and the result
1538 * @cdb: CDB for packet command
1539 * @dma_dir: Data tranfer direction of the command
1540 * @buf: Data buffer of the command
1541 * @buflen: Length of data buffer
1542 *
1543 * Wrapper around ata_exec_internal_sg() which takes simple
1544 * buffer instead of sg list.
1545 *
1546 * LOCKING:
1547 * None. Should be called with kernel context, might sleep.
1548 *
1549 * RETURNS:
1550 * Zero on success, AC_ERR_* mask on failure
1551 */
1552unsigned ata_exec_internal(struct ata_device *dev,
1553 struct ata_taskfile *tf, const u8 *cdb,
1554 int dma_dir, void *buf, unsigned int buflen)
1555{
33480a0e
TH
1556 struct scatterlist *psg = NULL, sg;
1557 unsigned int n_elem = 0;
2432697b 1558
33480a0e
TH
1559 if (dma_dir != DMA_NONE) {
1560 WARN_ON(!buf);
1561 sg_init_one(&sg, buf, buflen);
1562 psg = &sg;
1563 n_elem++;
1564 }
2432697b 1565
33480a0e 1566 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1567}
1568
977e6b9f
TH
1569/**
1570 * ata_do_simple_cmd - execute simple internal command
1571 * @dev: Device to which the command is sent
1572 * @cmd: Opcode to execute
1573 *
1574 * Execute a 'simple' command, that only consists of the opcode
1575 * 'cmd' itself, without filling any other registers
1576 *
1577 * LOCKING:
1578 * Kernel thread context (may sleep).
1579 *
1580 * RETURNS:
1581 * Zero on success, AC_ERR_* mask on failure
e58eb583 1582 */
77b08fb5 1583unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1584{
1585 struct ata_taskfile tf;
e58eb583
TH
1586
1587 ata_tf_init(dev, &tf);
1588
1589 tf.command = cmd;
1590 tf.flags |= ATA_TFLAG_DEVICE;
1591 tf.protocol = ATA_PROT_NODATA;
1592
977e6b9f 1593 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1594}
1595
1bc4ccff
AC
1596/**
1597 * ata_pio_need_iordy - check if iordy needed
1598 * @adev: ATA device
1599 *
1600 * Check if the current speed of the device requires IORDY. Used
1601 * by various controllers for chip configuration.
1602 */
a617c09f 1603
1bc4ccff
AC
1604unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1605{
432729f0
AC
1606 /* Controller doesn't support IORDY. Probably a pointless check
1607 as the caller should know this */
9af5c9c9 1608 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1609 return 0;
432729f0
AC
1610 /* PIO3 and higher it is mandatory */
1611 if (adev->pio_mode > XFER_PIO_2)
1612 return 1;
1613 /* We turn it on when possible */
1614 if (ata_id_has_iordy(adev->id))
1bc4ccff 1615 return 1;
432729f0
AC
1616 return 0;
1617}
2e9edbf8 1618
432729f0
AC
1619/**
1620 * ata_pio_mask_no_iordy - Return the non IORDY mask
1621 * @adev: ATA device
1622 *
1623 * Compute the highest mode possible if we are not using iordy. Return
1624 * -1 if no iordy mode is available.
1625 */
a617c09f 1626
432729f0
AC
1627static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1628{
1bc4ccff 1629 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1630 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1631 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1632 /* Is the speed faster than the drive allows non IORDY ? */
1633 if (pio) {
1634 /* This is cycle times not frequency - watch the logic! */
1635 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1636 return 3 << ATA_SHIFT_PIO;
1637 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1638 }
1639 }
432729f0 1640 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1641}
1642
1da177e4 1643/**
49016aca 1644 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1645 * @dev: target device
1646 * @p_class: pointer to class of the target device (may be changed)
bff04647 1647 * @flags: ATA_READID_* flags
fe635c7e 1648 * @id: buffer to read IDENTIFY data into
1da177e4 1649 *
49016aca
TH
1650 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1651 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1652 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1653 * for pre-ATA4 drives.
1da177e4 1654 *
50a99018
AC
1655 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1656 * now we abort if we hit that case.
1657 *
1da177e4 1658 * LOCKING:
49016aca
TH
1659 * Kernel thread context (may sleep)
1660 *
1661 * RETURNS:
1662 * 0 on success, -errno otherwise.
1da177e4 1663 */
a9beec95 1664int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1665 unsigned int flags, u16 *id)
1da177e4 1666{
9af5c9c9 1667 struct ata_port *ap = dev->link->ap;
49016aca 1668 unsigned int class = *p_class;
a0123703 1669 struct ata_taskfile tf;
49016aca
TH
1670 unsigned int err_mask = 0;
1671 const char *reason;
54936f8b 1672 int may_fallback = 1, tried_spinup = 0;
49016aca 1673 int rc;
1da177e4 1674
0dd4b21f 1675 if (ata_msg_ctl(ap))
44877b4e 1676 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1677
49016aca 1678 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1679 retry:
3373efd8 1680 ata_tf_init(dev, &tf);
a0123703 1681
49016aca
TH
1682 switch (class) {
1683 case ATA_DEV_ATA:
a0123703 1684 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1685 break;
1686 case ATA_DEV_ATAPI:
a0123703 1687 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1688 break;
1689 default:
1690 rc = -ENODEV;
1691 reason = "unsupported class";
1692 goto err_out;
1da177e4
LT
1693 }
1694
a0123703 1695 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1696
1697 /* Some devices choke if TF registers contain garbage. Make
1698 * sure those are properly initialized.
1699 */
1700 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1701
1702 /* Device presence detection is unreliable on some
1703 * controllers. Always poll IDENTIFY if available.
1704 */
1705 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1706
3373efd8 1707 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1708 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1709 if (err_mask) {
800b3996 1710 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1711 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1712 ap->print_id, dev->devno);
55a8e2c8
TH
1713 return -ENOENT;
1714 }
1715
54936f8b
TH
1716 /* Device or controller might have reported the wrong
1717 * device class. Give a shot at the other IDENTIFY if
1718 * the current one is aborted by the device.
1719 */
1720 if (may_fallback &&
1721 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1722 may_fallback = 0;
1723
1724 if (class == ATA_DEV_ATA)
1725 class = ATA_DEV_ATAPI;
1726 else
1727 class = ATA_DEV_ATA;
1728 goto retry;
1729 }
1730
49016aca
TH
1731 rc = -EIO;
1732 reason = "I/O error";
1da177e4
LT
1733 goto err_out;
1734 }
1735
54936f8b
TH
1736 /* Falling back doesn't make sense if ID data was read
1737 * successfully at least once.
1738 */
1739 may_fallback = 0;
1740
49016aca 1741 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1742
49016aca 1743 /* sanity check */
a4f5749b 1744 rc = -EINVAL;
6070068b 1745 reason = "device reports invalid type";
a4f5749b
TH
1746
1747 if (class == ATA_DEV_ATA) {
1748 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1749 goto err_out;
1750 } else {
1751 if (ata_id_is_ata(id))
1752 goto err_out;
49016aca
TH
1753 }
1754
169439c2
ML
1755 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1756 tried_spinup = 1;
1757 /*
1758 * Drive powered-up in standby mode, and requires a specific
1759 * SET_FEATURES spin-up subcommand before it will accept
1760 * anything other than the original IDENTIFY command.
1761 */
1762 ata_tf_init(dev, &tf);
1763 tf.command = ATA_CMD_SET_FEATURES;
1764 tf.feature = SETFEATURES_SPINUP;
1765 tf.protocol = ATA_PROT_NODATA;
1766 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1767 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
fb0582f9 1768 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1769 rc = -EIO;
1770 reason = "SPINUP failed";
1771 goto err_out;
1772 }
1773 /*
1774 * If the drive initially returned incomplete IDENTIFY info,
1775 * we now must reissue the IDENTIFY command.
1776 */
1777 if (id[2] == 0x37c8)
1778 goto retry;
1779 }
1780
bff04647 1781 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1782 /*
1783 * The exact sequence expected by certain pre-ATA4 drives is:
1784 * SRST RESET
50a99018
AC
1785 * IDENTIFY (optional in early ATA)
1786 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1787 * anything else..
1788 * Some drives were very specific about that exact sequence.
50a99018
AC
1789 *
1790 * Note that ATA4 says lba is mandatory so the second check
1791 * shoud never trigger.
49016aca
TH
1792 */
1793 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1794 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1795 if (err_mask) {
1796 rc = -EIO;
1797 reason = "INIT_DEV_PARAMS failed";
1798 goto err_out;
1799 }
1800
1801 /* current CHS translation info (id[53-58]) might be
1802 * changed. reread the identify device info.
1803 */
bff04647 1804 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1805 goto retry;
1806 }
1807 }
1808
1809 *p_class = class;
fe635c7e 1810
49016aca
TH
1811 return 0;
1812
1813 err_out:
88574551 1814 if (ata_msg_warn(ap))
0dd4b21f 1815 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1816 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1817 return rc;
1818}
1819
3373efd8 1820static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1821{
9af5c9c9
TH
1822 struct ata_port *ap = dev->link->ap;
1823 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1824}
1825
a6e6ce8e
TH
1826static void ata_dev_config_ncq(struct ata_device *dev,
1827 char *desc, size_t desc_sz)
1828{
9af5c9c9 1829 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
1830 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1831
1832 if (!ata_id_has_ncq(dev->id)) {
1833 desc[0] = '\0';
1834 return;
1835 }
75683fe7 1836 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
1837 snprintf(desc, desc_sz, "NCQ (not used)");
1838 return;
1839 }
a6e6ce8e 1840 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1841 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1842 dev->flags |= ATA_DFLAG_NCQ;
1843 }
1844
1845 if (hdepth >= ddepth)
1846 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1847 else
1848 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1849}
1850
49016aca 1851/**
ffeae418 1852 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1853 * @dev: Target device to configure
1854 *
1855 * Configure @dev according to @dev->id. Generic and low-level
1856 * driver specific fixups are also applied.
49016aca
TH
1857 *
1858 * LOCKING:
ffeae418
TH
1859 * Kernel thread context (may sleep)
1860 *
1861 * RETURNS:
1862 * 0 on success, -errno otherwise
49016aca 1863 */
efdaedc4 1864int ata_dev_configure(struct ata_device *dev)
49016aca 1865{
9af5c9c9
TH
1866 struct ata_port *ap = dev->link->ap;
1867 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 1868 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1869 const u16 *id = dev->id;
ff8854b2 1870 unsigned int xfer_mask;
b352e57d 1871 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1872 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1873 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1874 int rc;
49016aca 1875
0dd4b21f 1876 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1877 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1878 __FUNCTION__);
ffeae418 1879 return 0;
49016aca
TH
1880 }
1881
0dd4b21f 1882 if (ata_msg_probe(ap))
44877b4e 1883 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1884
75683fe7
TH
1885 /* set horkage */
1886 dev->horkage |= ata_dev_blacklisted(dev);
1887
6746544c
TH
1888 /* let ACPI work its magic */
1889 rc = ata_acpi_on_devcfg(dev);
1890 if (rc)
1891 return rc;
08573a86 1892
05027adc
TH
1893 /* massage HPA, do it early as it might change IDENTIFY data */
1894 rc = ata_hpa_resize(dev);
1895 if (rc)
1896 return rc;
1897
c39f5ebe 1898 /* print device capabilities */
0dd4b21f 1899 if (ata_msg_probe(ap))
88574551
TH
1900 ata_dev_printk(dev, KERN_DEBUG,
1901 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1902 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1903 __FUNCTION__,
f15a1daf
TH
1904 id[49], id[82], id[83], id[84],
1905 id[85], id[86], id[87], id[88]);
c39f5ebe 1906
208a9933 1907 /* initialize to-be-configured parameters */
ea1dd4e1 1908 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1909 dev->max_sectors = 0;
1910 dev->cdb_len = 0;
1911 dev->n_sectors = 0;
1912 dev->cylinders = 0;
1913 dev->heads = 0;
1914 dev->sectors = 0;
1915
1da177e4
LT
1916 /*
1917 * common ATA, ATAPI feature tests
1918 */
1919
ff8854b2 1920 /* find max transfer mode; for printk only */
1148c3a7 1921 xfer_mask = ata_id_xfermask(id);
1da177e4 1922
0dd4b21f
BP
1923 if (ata_msg_probe(ap))
1924 ata_dump_id(id);
1da177e4 1925
ef143d57
AL
1926 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1927 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1928 sizeof(fwrevbuf));
1929
1930 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1931 sizeof(modelbuf));
1932
1da177e4
LT
1933 /* ATA-specific feature tests */
1934 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1935 if (ata_id_is_cfa(id)) {
1936 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1937 ata_dev_printk(dev, KERN_WARNING,
1938 "supports DRM functions and may "
1939 "not be fully accessable.\n");
b352e57d
AC
1940 snprintf(revbuf, 7, "CFA");
1941 }
1942 else
1943 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1944
1148c3a7 1945 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1946
3f64f565
EM
1947 if (dev->id[59] & 0x100)
1948 dev->multi_count = dev->id[59] & 0xff;
1949
1148c3a7 1950 if (ata_id_has_lba(id)) {
4c2d721a 1951 const char *lba_desc;
a6e6ce8e 1952 char ncq_desc[20];
8bf62ece 1953
4c2d721a
TH
1954 lba_desc = "LBA";
1955 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1956 if (ata_id_has_lba48(id)) {
8bf62ece 1957 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1958 lba_desc = "LBA48";
6fc49adb
TH
1959
1960 if (dev->n_sectors >= (1UL << 28) &&
1961 ata_id_has_flush_ext(id))
1962 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1963 }
8bf62ece 1964
a6e6ce8e
TH
1965 /* config NCQ */
1966 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1967
8bf62ece 1968 /* print device info to dmesg */
3f64f565
EM
1969 if (ata_msg_drv(ap) && print_info) {
1970 ata_dev_printk(dev, KERN_INFO,
1971 "%s: %s, %s, max %s\n",
1972 revbuf, modelbuf, fwrevbuf,
1973 ata_mode_string(xfer_mask));
1974 ata_dev_printk(dev, KERN_INFO,
1975 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1976 (unsigned long long)dev->n_sectors,
3f64f565
EM
1977 dev->multi_count, lba_desc, ncq_desc);
1978 }
ffeae418 1979 } else {
8bf62ece
AL
1980 /* CHS */
1981
1982 /* Default translation */
1148c3a7
TH
1983 dev->cylinders = id[1];
1984 dev->heads = id[3];
1985 dev->sectors = id[6];
8bf62ece 1986
1148c3a7 1987 if (ata_id_current_chs_valid(id)) {
8bf62ece 1988 /* Current CHS translation is valid. */
1148c3a7
TH
1989 dev->cylinders = id[54];
1990 dev->heads = id[55];
1991 dev->sectors = id[56];
8bf62ece
AL
1992 }
1993
1994 /* print device info to dmesg */
3f64f565 1995 if (ata_msg_drv(ap) && print_info) {
88574551 1996 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1997 "%s: %s, %s, max %s\n",
1998 revbuf, modelbuf, fwrevbuf,
1999 ata_mode_string(xfer_mask));
a84471fe 2000 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2001 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2002 (unsigned long long)dev->n_sectors,
2003 dev->multi_count, dev->cylinders,
2004 dev->heads, dev->sectors);
2005 }
07f6f7d0
AL
2006 }
2007
6e7846e9 2008 dev->cdb_len = 16;
1da177e4
LT
2009 }
2010
2011 /* ATAPI-specific feature tests */
2c13b7ce 2012 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
2013 char *cdb_intr_string = "";
2014
1148c3a7 2015 rc = atapi_cdb_len(id);
1da177e4 2016 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2017 if (ata_msg_warn(ap))
88574551
TH
2018 ata_dev_printk(dev, KERN_WARNING,
2019 "unsupported CDB len\n");
ffeae418 2020 rc = -EINVAL;
1da177e4
LT
2021 goto err_out_nosup;
2022 }
6e7846e9 2023 dev->cdb_len = (unsigned int) rc;
1da177e4 2024
9f45cbd3
KCA
2025 /*
2026 * check to see if this ATAPI device supports
2027 * Asynchronous Notification
2028 */
2029 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_AN(id)) {
2030 int err;
2031 /* issue SET feature command to turn this on */
2032 err = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2033 if (err)
2034 ata_dev_printk(dev, KERN_ERR,
2035 "unable to set AN, err %x\n",
2036 err);
2037 else
2038 dev->flags |= ATA_DFLAG_AN;
2039 }
2040
08a556db 2041 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2042 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2043 cdb_intr_string = ", CDB intr";
2044 }
312f7da2 2045
1da177e4 2046 /* print device info to dmesg */
5afc8142 2047 if (ata_msg_drv(ap) && print_info)
ef143d57
AL
2048 ata_dev_printk(dev, KERN_INFO,
2049 "ATAPI: %s, %s, max %s%s\n",
2050 modelbuf, fwrevbuf,
12436c30
TH
2051 ata_mode_string(xfer_mask),
2052 cdb_intr_string);
1da177e4
LT
2053 }
2054
914ed354
TH
2055 /* determine max_sectors */
2056 dev->max_sectors = ATA_MAX_SECTORS;
2057 if (dev->flags & ATA_DFLAG_LBA48)
2058 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2059
93590859
AC
2060 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2061 /* Let the user know. We don't want to disallow opens for
2062 rescue purposes, or in case the vendor is just a blithering
2063 idiot */
2064 if (print_info) {
2065 ata_dev_printk(dev, KERN_WARNING,
2066"Drive reports diagnostics failure. This may indicate a drive\n");
2067 ata_dev_printk(dev, KERN_WARNING,
2068"fault or invalid emulation. Contact drive vendor for information.\n");
2069 }
2070 }
2071
4b2f3ede 2072 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2073 if (ata_dev_knobble(dev)) {
5afc8142 2074 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2075 ata_dev_printk(dev, KERN_INFO,
2076 "applying bridge limits\n");
5a529139 2077 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2078 dev->max_sectors = ATA_MAX_SECTORS;
2079 }
2080
75683fe7 2081 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2082 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2083 dev->max_sectors);
18d6e9d5 2084
4b2f3ede 2085 if (ap->ops->dev_config)
cd0d3bbc 2086 ap->ops->dev_config(dev);
4b2f3ede 2087
0dd4b21f
BP
2088 if (ata_msg_probe(ap))
2089 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2090 __FUNCTION__, ata_chk_status(ap));
ffeae418 2091 return 0;
1da177e4
LT
2092
2093err_out_nosup:
0dd4b21f 2094 if (ata_msg_probe(ap))
88574551
TH
2095 ata_dev_printk(dev, KERN_DEBUG,
2096 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2097 return rc;
1da177e4
LT
2098}
2099
be0d18df 2100/**
2e41e8e6 2101 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2102 * @ap: port
2103 *
2e41e8e6 2104 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2105 * detection.
2106 */
2107
2108int ata_cable_40wire(struct ata_port *ap)
2109{
2110 return ATA_CBL_PATA40;
2111}
2112
2113/**
2e41e8e6 2114 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2115 * @ap: port
2116 *
2e41e8e6 2117 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2118 * detection.
2119 */
2120
2121int ata_cable_80wire(struct ata_port *ap)
2122{
2123 return ATA_CBL_PATA80;
2124}
2125
2126/**
2127 * ata_cable_unknown - return unknown PATA cable.
2128 * @ap: port
2129 *
2130 * Helper method for drivers which have no PATA cable detection.
2131 */
2132
2133int ata_cable_unknown(struct ata_port *ap)
2134{
2135 return ATA_CBL_PATA_UNK;
2136}
2137
2138/**
2139 * ata_cable_sata - return SATA cable type
2140 * @ap: port
2141 *
2142 * Helper method for drivers which have SATA cables
2143 */
2144
2145int ata_cable_sata(struct ata_port *ap)
2146{
2147 return ATA_CBL_SATA;
2148}
2149
1da177e4
LT
2150/**
2151 * ata_bus_probe - Reset and probe ATA bus
2152 * @ap: Bus to probe
2153 *
0cba632b
JG
2154 * Master ATA bus probing function. Initiates a hardware-dependent
2155 * bus reset, then attempts to identify any devices found on
2156 * the bus.
2157 *
1da177e4 2158 * LOCKING:
0cba632b 2159 * PCI/etc. bus probe sem.
1da177e4
LT
2160 *
2161 * RETURNS:
96072e69 2162 * Zero on success, negative errno otherwise.
1da177e4
LT
2163 */
2164
80289167 2165int ata_bus_probe(struct ata_port *ap)
1da177e4 2166{
28ca5c57 2167 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2168 int tries[ATA_MAX_DEVICES];
f58229f8 2169 int rc;
e82cbdb9 2170 struct ata_device *dev;
1da177e4 2171
28ca5c57 2172 ata_port_probe(ap);
c19ba8af 2173
f58229f8
TH
2174 ata_link_for_each_dev(dev, &ap->link)
2175 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2176
2177 retry:
2044470c 2178 /* reset and determine device classes */
52783c5d 2179 ap->ops->phy_reset(ap);
2061a47a 2180
f58229f8 2181 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2182 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2183 dev->class != ATA_DEV_UNKNOWN)
2184 classes[dev->devno] = dev->class;
2185 else
2186 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2187
52783c5d 2188 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2189 }
1da177e4 2190
52783c5d 2191 ata_port_probe(ap);
2044470c 2192
b6079ca4
AC
2193 /* after the reset the device state is PIO 0 and the controller
2194 state is undefined. Record the mode */
2195
f58229f8
TH
2196 ata_link_for_each_dev(dev, &ap->link)
2197 dev->pio_mode = XFER_PIO_0;
b6079ca4 2198
f31f0cc2
JG
2199 /* read IDENTIFY page and configure devices. We have to do the identify
2200 specific sequence bass-ackwards so that PDIAG- is released by
2201 the slave device */
2202
f58229f8
TH
2203 ata_link_for_each_dev(dev, &ap->link) {
2204 if (tries[dev->devno])
2205 dev->class = classes[dev->devno];
ffeae418 2206
14d2bac1 2207 if (!ata_dev_enabled(dev))
ffeae418 2208 continue;
ffeae418 2209
bff04647
TH
2210 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2211 dev->id);
14d2bac1
TH
2212 if (rc)
2213 goto fail;
f31f0cc2
JG
2214 }
2215
be0d18df
AC
2216 /* Now ask for the cable type as PDIAG- should have been released */
2217 if (ap->ops->cable_detect)
2218 ap->cbl = ap->ops->cable_detect(ap);
2219
614fe29b
AC
2220 /* We may have SATA bridge glue hiding here irrespective of the
2221 reported cable types and sensed types */
2222 ata_link_for_each_dev(dev, &ap->link) {
2223 if (!ata_dev_enabled(dev))
2224 continue;
2225 /* SATA drives indicate we have a bridge. We don't know which
2226 end of the link the bridge is which is a problem */
2227 if (ata_id_is_sata(dev->id))
2228 ap->cbl = ATA_CBL_SATA;
2229 }
2230
f31f0cc2
JG
2231 /* After the identify sequence we can now set up the devices. We do
2232 this in the normal order so that the user doesn't get confused */
2233
f58229f8 2234 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2235 if (!ata_dev_enabled(dev))
2236 continue;
14d2bac1 2237
9af5c9c9 2238 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2239 rc = ata_dev_configure(dev);
9af5c9c9 2240 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2241 if (rc)
2242 goto fail;
1da177e4
LT
2243 }
2244
e82cbdb9 2245 /* configure transfer mode */
0260731f 2246 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2247 if (rc)
51713d35 2248 goto fail;
1da177e4 2249
f58229f8
TH
2250 ata_link_for_each_dev(dev, &ap->link)
2251 if (ata_dev_enabled(dev))
e82cbdb9 2252 return 0;
1da177e4 2253
e82cbdb9
TH
2254 /* no device present, disable port */
2255 ata_port_disable(ap);
96072e69 2256 return -ENODEV;
14d2bac1
TH
2257
2258 fail:
4ae72a1e
TH
2259 tries[dev->devno]--;
2260
14d2bac1
TH
2261 switch (rc) {
2262 case -EINVAL:
4ae72a1e 2263 /* eeek, something went very wrong, give up */
14d2bac1
TH
2264 tries[dev->devno] = 0;
2265 break;
4ae72a1e
TH
2266
2267 case -ENODEV:
2268 /* give it just one more chance */
2269 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2270 case -EIO:
4ae72a1e
TH
2271 if (tries[dev->devno] == 1) {
2272 /* This is the last chance, better to slow
2273 * down than lose it.
2274 */
936fd732 2275 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2276 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2277 }
14d2bac1
TH
2278 }
2279
4ae72a1e 2280 if (!tries[dev->devno])
3373efd8 2281 ata_dev_disable(dev);
ec573755 2282
14d2bac1 2283 goto retry;
1da177e4
LT
2284}
2285
2286/**
0cba632b
JG
2287 * ata_port_probe - Mark port as enabled
2288 * @ap: Port for which we indicate enablement
1da177e4 2289 *
0cba632b
JG
2290 * Modify @ap data structure such that the system
2291 * thinks that the entire port is enabled.
2292 *
cca3974e 2293 * LOCKING: host lock, or some other form of
0cba632b 2294 * serialization.
1da177e4
LT
2295 */
2296
2297void ata_port_probe(struct ata_port *ap)
2298{
198e0fed 2299 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2300}
2301
3be680b7
TH
2302/**
2303 * sata_print_link_status - Print SATA link status
936fd732 2304 * @link: SATA link to printk link status about
3be680b7
TH
2305 *
2306 * This function prints link speed and status of a SATA link.
2307 *
2308 * LOCKING:
2309 * None.
2310 */
936fd732 2311void sata_print_link_status(struct ata_link *link)
3be680b7 2312{
6d5f9732 2313 u32 sstatus, scontrol, tmp;
3be680b7 2314
936fd732 2315 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2316 return;
936fd732 2317 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2318
936fd732 2319 if (ata_link_online(link)) {
3be680b7 2320 tmp = (sstatus >> 4) & 0xf;
936fd732 2321 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2322 "SATA link up %s (SStatus %X SControl %X)\n",
2323 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2324 } else {
936fd732 2325 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2326 "SATA link down (SStatus %X SControl %X)\n",
2327 sstatus, scontrol);
3be680b7
TH
2328 }
2329}
2330
1da177e4 2331/**
780a87f7
JG
2332 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2333 * @ap: SATA port associated with target SATA PHY.
1da177e4 2334 *
780a87f7
JG
2335 * This function issues commands to standard SATA Sxxx
2336 * PHY registers, to wake up the phy (and device), and
2337 * clear any reset condition.
1da177e4
LT
2338 *
2339 * LOCKING:
0cba632b 2340 * PCI/etc. bus probe sem.
1da177e4
LT
2341 *
2342 */
2343void __sata_phy_reset(struct ata_port *ap)
2344{
936fd732 2345 struct ata_link *link = &ap->link;
1da177e4 2346 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2347 u32 sstatus;
1da177e4
LT
2348
2349 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2350 /* issue phy wake/reset */
936fd732 2351 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2352 /* Couldn't find anything in SATA I/II specs, but
2353 * AHCI-1.1 10.4.2 says at least 1 ms. */
2354 mdelay(1);
1da177e4 2355 }
81952c54 2356 /* phy wake/clear reset */
936fd732 2357 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2358
2359 /* wait for phy to become ready, if necessary */
2360 do {
2361 msleep(200);
936fd732 2362 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2363 if ((sstatus & 0xf) != 1)
2364 break;
2365 } while (time_before(jiffies, timeout));
2366
3be680b7 2367 /* print link status */
936fd732 2368 sata_print_link_status(link);
656563e3 2369
3be680b7 2370 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2371 if (!ata_link_offline(link))
1da177e4 2372 ata_port_probe(ap);
3be680b7 2373 else
1da177e4 2374 ata_port_disable(ap);
1da177e4 2375
198e0fed 2376 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2377 return;
2378
2379 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2380 ata_port_disable(ap);
2381 return;
2382 }
2383
2384 ap->cbl = ATA_CBL_SATA;
2385}
2386
2387/**
780a87f7
JG
2388 * sata_phy_reset - Reset SATA bus.
2389 * @ap: SATA port associated with target SATA PHY.
1da177e4 2390 *
780a87f7
JG
2391 * This function resets the SATA bus, and then probes
2392 * the bus for devices.
1da177e4
LT
2393 *
2394 * LOCKING:
0cba632b 2395 * PCI/etc. bus probe sem.
1da177e4
LT
2396 *
2397 */
2398void sata_phy_reset(struct ata_port *ap)
2399{
2400 __sata_phy_reset(ap);
198e0fed 2401 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2402 return;
2403 ata_bus_reset(ap);
2404}
2405
ebdfca6e
AC
2406/**
2407 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2408 * @adev: device
2409 *
2410 * Obtain the other device on the same cable, or if none is
2411 * present NULL is returned
2412 */
2e9edbf8 2413
3373efd8 2414struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2415{
9af5c9c9
TH
2416 struct ata_link *link = adev->link;
2417 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2418 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2419 return NULL;
2420 return pair;
2421}
2422
1da177e4 2423/**
780a87f7
JG
2424 * ata_port_disable - Disable port.
2425 * @ap: Port to be disabled.
1da177e4 2426 *
780a87f7
JG
2427 * Modify @ap data structure such that the system
2428 * thinks that the entire port is disabled, and should
2429 * never attempt to probe or communicate with devices
2430 * on this port.
2431 *
cca3974e 2432 * LOCKING: host lock, or some other form of
780a87f7 2433 * serialization.
1da177e4
LT
2434 */
2435
2436void ata_port_disable(struct ata_port *ap)
2437{
9af5c9c9
TH
2438 ap->link.device[0].class = ATA_DEV_NONE;
2439 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2440 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2441}
2442
1c3fae4d 2443/**
3c567b7d 2444 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2445 * @link: Link to adjust SATA spd limit for
1c3fae4d 2446 *
936fd732 2447 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2448 * function only adjusts the limit. The change must be applied
3c567b7d 2449 * using sata_set_spd().
1c3fae4d
TH
2450 *
2451 * LOCKING:
2452 * Inherited from caller.
2453 *
2454 * RETURNS:
2455 * 0 on success, negative errno on failure
2456 */
936fd732 2457int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2458{
81952c54
TH
2459 u32 sstatus, spd, mask;
2460 int rc, highbit;
1c3fae4d 2461
936fd732 2462 if (!sata_scr_valid(link))
008a7896
TH
2463 return -EOPNOTSUPP;
2464
2465 /* If SCR can be read, use it to determine the current SPD.
936fd732 2466 * If not, use cached value in link->sata_spd.
008a7896 2467 */
936fd732 2468 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2469 if (rc == 0)
2470 spd = (sstatus >> 4) & 0xf;
2471 else
936fd732 2472 spd = link->sata_spd;
1c3fae4d 2473
936fd732 2474 mask = link->sata_spd_limit;
1c3fae4d
TH
2475 if (mask <= 1)
2476 return -EINVAL;
008a7896
TH
2477
2478 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2479 highbit = fls(mask) - 1;
2480 mask &= ~(1 << highbit);
2481
008a7896
TH
2482 /* Mask off all speeds higher than or equal to the current
2483 * one. Force 1.5Gbps if current SPD is not available.
2484 */
2485 if (spd > 1)
2486 mask &= (1 << (spd - 1)) - 1;
2487 else
2488 mask &= 1;
2489
2490 /* were we already at the bottom? */
1c3fae4d
TH
2491 if (!mask)
2492 return -EINVAL;
2493
936fd732 2494 link->sata_spd_limit = mask;
1c3fae4d 2495
936fd732 2496 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2497 sata_spd_string(fls(mask)));
1c3fae4d
TH
2498
2499 return 0;
2500}
2501
936fd732 2502static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d
TH
2503{
2504 u32 spd, limit;
2505
936fd732 2506 if (link->sata_spd_limit == UINT_MAX)
1c3fae4d
TH
2507 limit = 0;
2508 else
936fd732 2509 limit = fls(link->sata_spd_limit);
1c3fae4d
TH
2510
2511 spd = (*scontrol >> 4) & 0xf;
2512 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2513
2514 return spd != limit;
2515}
2516
2517/**
3c567b7d 2518 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2519 * @link: Link in question
1c3fae4d
TH
2520 *
2521 * Test whether the spd limit in SControl matches
936fd732 2522 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2523 * whether hardreset is necessary to apply SATA spd
2524 * configuration.
2525 *
2526 * LOCKING:
2527 * Inherited from caller.
2528 *
2529 * RETURNS:
2530 * 1 if SATA spd configuration is needed, 0 otherwise.
2531 */
936fd732 2532int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2533{
2534 u32 scontrol;
2535
936fd732 2536 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2537 return 0;
2538
936fd732 2539 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2540}
2541
2542/**
3c567b7d 2543 * sata_set_spd - set SATA spd according to spd limit
936fd732 2544 * @link: Link to set SATA spd for
1c3fae4d 2545 *
936fd732 2546 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2547 *
2548 * LOCKING:
2549 * Inherited from caller.
2550 *
2551 * RETURNS:
2552 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2553 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2554 */
936fd732 2555int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2556{
2557 u32 scontrol;
81952c54 2558 int rc;
1c3fae4d 2559
936fd732 2560 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2561 return rc;
1c3fae4d 2562
936fd732 2563 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2564 return 0;
2565
936fd732 2566 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2567 return rc;
2568
1c3fae4d
TH
2569 return 1;
2570}
2571
452503f9
AC
2572/*
2573 * This mode timing computation functionality is ported over from
2574 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2575 */
2576/*
b352e57d 2577 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2578 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2579 * for UDMA6, which is currently supported only by Maxtor drives.
2580 *
2581 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2582 */
2583
2584static const struct ata_timing ata_timing[] = {
2585
2586 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2587 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2588 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2589 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2590
b352e57d
AC
2591 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2592 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2593 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2594 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2595 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2596
2597/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2598
452503f9
AC
2599 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2600 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2601 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2602
452503f9
AC
2603 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2604 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2605 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2606
b352e57d
AC
2607 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2608 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2609 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2610 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2611
2612 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2613 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2614 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2615
2616/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2617
2618 { 0xFF }
2619};
2620
2621#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2622#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2623
2624static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2625{
2626 q->setup = EZ(t->setup * 1000, T);
2627 q->act8b = EZ(t->act8b * 1000, T);
2628 q->rec8b = EZ(t->rec8b * 1000, T);
2629 q->cyc8b = EZ(t->cyc8b * 1000, T);
2630 q->active = EZ(t->active * 1000, T);
2631 q->recover = EZ(t->recover * 1000, T);
2632 q->cycle = EZ(t->cycle * 1000, T);
2633 q->udma = EZ(t->udma * 1000, UT);
2634}
2635
2636void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2637 struct ata_timing *m, unsigned int what)
2638{
2639 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2640 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2641 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2642 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2643 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2644 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2645 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2646 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2647}
2648
2649static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2650{
2651 const struct ata_timing *t;
2652
2653 for (t = ata_timing; t->mode != speed; t++)
91190758 2654 if (t->mode == 0xFF)
452503f9 2655 return NULL;
2e9edbf8 2656 return t;
452503f9
AC
2657}
2658
2659int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2660 struct ata_timing *t, int T, int UT)
2661{
2662 const struct ata_timing *s;
2663 struct ata_timing p;
2664
2665 /*
2e9edbf8 2666 * Find the mode.
75b1f2f8 2667 */
452503f9
AC
2668
2669 if (!(s = ata_timing_find_mode(speed)))
2670 return -EINVAL;
2671
75b1f2f8
AL
2672 memcpy(t, s, sizeof(*s));
2673
452503f9
AC
2674 /*
2675 * If the drive is an EIDE drive, it can tell us it needs extended
2676 * PIO/MW_DMA cycle timing.
2677 */
2678
2679 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2680 memset(&p, 0, sizeof(p));
2681 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2682 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2683 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2684 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2685 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2686 }
2687 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2688 }
2689
2690 /*
2691 * Convert the timing to bus clock counts.
2692 */
2693
75b1f2f8 2694 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2695
2696 /*
c893a3ae
RD
2697 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2698 * S.M.A.R.T * and some other commands. We have to ensure that the
2699 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2700 */
2701
fd3367af 2702 if (speed > XFER_PIO_6) {
452503f9
AC
2703 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2704 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2705 }
2706
2707 /*
c893a3ae 2708 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2709 */
2710
2711 if (t->act8b + t->rec8b < t->cyc8b) {
2712 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2713 t->rec8b = t->cyc8b - t->act8b;
2714 }
2715
2716 if (t->active + t->recover < t->cycle) {
2717 t->active += (t->cycle - (t->active + t->recover)) / 2;
2718 t->recover = t->cycle - t->active;
2719 }
a617c09f 2720
4f701d1e
AC
2721 /* In a few cases quantisation may produce enough errors to
2722 leave t->cycle too low for the sum of active and recovery
2723 if so we must correct this */
2724 if (t->active + t->recover > t->cycle)
2725 t->cycle = t->active + t->recover;
452503f9
AC
2726
2727 return 0;
2728}
2729
cf176e1a
TH
2730/**
2731 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2732 * @dev: Device to adjust xfer masks
458337db 2733 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2734 *
2735 * Adjust xfer masks of @dev downward. Note that this function
2736 * does not apply the change. Invoking ata_set_mode() afterwards
2737 * will apply the limit.
2738 *
2739 * LOCKING:
2740 * Inherited from caller.
2741 *
2742 * RETURNS:
2743 * 0 on success, negative errno on failure
2744 */
458337db 2745int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2746{
458337db
TH
2747 char buf[32];
2748 unsigned int orig_mask, xfer_mask;
2749 unsigned int pio_mask, mwdma_mask, udma_mask;
2750 int quiet, highbit;
cf176e1a 2751
458337db
TH
2752 quiet = !!(sel & ATA_DNXFER_QUIET);
2753 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2754
458337db
TH
2755 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2756 dev->mwdma_mask,
2757 dev->udma_mask);
2758 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2759
458337db
TH
2760 switch (sel) {
2761 case ATA_DNXFER_PIO:
2762 highbit = fls(pio_mask) - 1;
2763 pio_mask &= ~(1 << highbit);
2764 break;
2765
2766 case ATA_DNXFER_DMA:
2767 if (udma_mask) {
2768 highbit = fls(udma_mask) - 1;
2769 udma_mask &= ~(1 << highbit);
2770 if (!udma_mask)
2771 return -ENOENT;
2772 } else if (mwdma_mask) {
2773 highbit = fls(mwdma_mask) - 1;
2774 mwdma_mask &= ~(1 << highbit);
2775 if (!mwdma_mask)
2776 return -ENOENT;
2777 }
2778 break;
2779
2780 case ATA_DNXFER_40C:
2781 udma_mask &= ATA_UDMA_MASK_40C;
2782 break;
2783
2784 case ATA_DNXFER_FORCE_PIO0:
2785 pio_mask &= 1;
2786 case ATA_DNXFER_FORCE_PIO:
2787 mwdma_mask = 0;
2788 udma_mask = 0;
2789 break;
2790
458337db
TH
2791 default:
2792 BUG();
2793 }
2794
2795 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2796
2797 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2798 return -ENOENT;
2799
2800 if (!quiet) {
2801 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2802 snprintf(buf, sizeof(buf), "%s:%s",
2803 ata_mode_string(xfer_mask),
2804 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2805 else
2806 snprintf(buf, sizeof(buf), "%s",
2807 ata_mode_string(xfer_mask));
2808
2809 ata_dev_printk(dev, KERN_WARNING,
2810 "limiting speed to %s\n", buf);
2811 }
cf176e1a
TH
2812
2813 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2814 &dev->udma_mask);
2815
cf176e1a 2816 return 0;
cf176e1a
TH
2817}
2818
3373efd8 2819static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2820{
9af5c9c9 2821 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
2822 unsigned int err_mask;
2823 int rc;
1da177e4 2824
e8384607 2825 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2826 if (dev->xfer_shift == ATA_SHIFT_PIO)
2827 dev->flags |= ATA_DFLAG_PIO;
2828
3373efd8 2829 err_mask = ata_dev_set_xfermode(dev);
11750a40
AC
2830 /* Old CFA may refuse this command, which is just fine */
2831 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2832 err_mask &= ~AC_ERR_DEV;
0bc2a79a
AC
2833 /* Some very old devices and some bad newer ones fail any kind of
2834 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2835 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2836 dev->pio_mode <= XFER_PIO_2)
2837 err_mask &= ~AC_ERR_DEV;
83206a29 2838 if (err_mask) {
f15a1daf
TH
2839 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2840 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2841 return -EIO;
2842 }
1da177e4 2843
baa1e78a 2844 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2845 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2846 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2847 if (rc)
83206a29 2848 return rc;
48a8a14f 2849
23e71c3d
TH
2850 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2851 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2852
f15a1daf
TH
2853 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2854 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2855 return 0;
1da177e4
LT
2856}
2857
1da177e4 2858/**
04351821 2859 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2860 * @link: link on which timings will be programmed
e82cbdb9 2861 * @r_failed_dev: out paramter for failed device
1da177e4 2862 *
04351821
AC
2863 * Standard implementation of the function used to tune and set
2864 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2865 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2866 * returned in @r_failed_dev.
780a87f7 2867 *
1da177e4 2868 * LOCKING:
0cba632b 2869 * PCI/etc. bus probe sem.
e82cbdb9
TH
2870 *
2871 * RETURNS:
2872 * 0 on success, negative errno otherwise
1da177e4 2873 */
04351821 2874
0260731f 2875int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 2876{
0260731f 2877 struct ata_port *ap = link->ap;
e8e0619f 2878 struct ata_device *dev;
f58229f8 2879 int rc = 0, used_dma = 0, found = 0;
3adcebb2 2880
a6d5a51c 2881 /* step 1: calculate xfer_mask */
f58229f8 2882 ata_link_for_each_dev(dev, link) {
acf356b1 2883 unsigned int pio_mask, dma_mask;
a6d5a51c 2884
e1211e3f 2885 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2886 continue;
2887
3373efd8 2888 ata_dev_xfermask(dev);
1da177e4 2889
acf356b1
TH
2890 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2891 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2892 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2893 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2894
4f65977d 2895 found = 1;
5444a6f4
AC
2896 if (dev->dma_mode)
2897 used_dma = 1;
a6d5a51c 2898 }
4f65977d 2899 if (!found)
e82cbdb9 2900 goto out;
a6d5a51c
TH
2901
2902 /* step 2: always set host PIO timings */
f58229f8 2903 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2904 if (!ata_dev_enabled(dev))
2905 continue;
2906
2907 if (!dev->pio_mode) {
f15a1daf 2908 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2909 rc = -EINVAL;
e82cbdb9 2910 goto out;
e8e0619f
TH
2911 }
2912
2913 dev->xfer_mode = dev->pio_mode;
2914 dev->xfer_shift = ATA_SHIFT_PIO;
2915 if (ap->ops->set_piomode)
2916 ap->ops->set_piomode(ap, dev);
2917 }
1da177e4 2918
a6d5a51c 2919 /* step 3: set host DMA timings */
f58229f8 2920 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2921 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2922 continue;
2923
2924 dev->xfer_mode = dev->dma_mode;
2925 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2926 if (ap->ops->set_dmamode)
2927 ap->ops->set_dmamode(ap, dev);
2928 }
1da177e4
LT
2929
2930 /* step 4: update devices' xfer mode */
f58229f8 2931 ata_link_for_each_dev(dev, link) {
18d90deb 2932 /* don't update suspended devices' xfer mode */
9666f400 2933 if (!ata_dev_enabled(dev))
83206a29
TH
2934 continue;
2935
3373efd8 2936 rc = ata_dev_set_mode(dev);
5bbc53f4 2937 if (rc)
e82cbdb9 2938 goto out;
83206a29 2939 }
1da177e4 2940
e8e0619f
TH
2941 /* Record simplex status. If we selected DMA then the other
2942 * host channels are not permitted to do so.
5444a6f4 2943 */
cca3974e 2944 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2945 ap->host->simplex_claimed = ap;
5444a6f4 2946
e82cbdb9
TH
2947 out:
2948 if (rc)
2949 *r_failed_dev = dev;
2950 return rc;
1da177e4
LT
2951}
2952
04351821
AC
2953/**
2954 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2955 * @link: link on which timings will be programmed
04351821
AC
2956 * @r_failed_dev: out paramter for failed device
2957 *
2958 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2959 * ata_set_mode() fails, pointer to the failing device is
2960 * returned in @r_failed_dev.
2961 *
2962 * LOCKING:
2963 * PCI/etc. bus probe sem.
2964 *
2965 * RETURNS:
2966 * 0 on success, negative errno otherwise
2967 */
0260731f 2968int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 2969{
0260731f
TH
2970 struct ata_port *ap = link->ap;
2971
04351821
AC
2972 /* has private set_mode? */
2973 if (ap->ops->set_mode)
0260731f
TH
2974 return ap->ops->set_mode(link, r_failed_dev);
2975 return ata_do_set_mode(link, r_failed_dev);
04351821
AC
2976}
2977
1fdffbce
JG
2978/**
2979 * ata_tf_to_host - issue ATA taskfile to host controller
2980 * @ap: port to which command is being issued
2981 * @tf: ATA taskfile register set
2982 *
2983 * Issues ATA taskfile register set to ATA host controller,
2984 * with proper synchronization with interrupt handler and
2985 * other threads.
2986 *
2987 * LOCKING:
cca3974e 2988 * spin_lock_irqsave(host lock)
1fdffbce
JG
2989 */
2990
2991static inline void ata_tf_to_host(struct ata_port *ap,
2992 const struct ata_taskfile *tf)
2993{
2994 ap->ops->tf_load(ap, tf);
2995 ap->ops->exec_command(ap, tf);
2996}
2997
1da177e4
LT
2998/**
2999 * ata_busy_sleep - sleep until BSY clears, or timeout
3000 * @ap: port containing status register to be polled
3001 * @tmout_pat: impatience timeout
3002 * @tmout: overall timeout
3003 *
780a87f7
JG
3004 * Sleep until ATA Status register bit BSY clears,
3005 * or a timeout occurs.
3006 *
d1adc1bb
TH
3007 * LOCKING:
3008 * Kernel thread context (may sleep).
3009 *
3010 * RETURNS:
3011 * 0 on success, -errno otherwise.
1da177e4 3012 */
d1adc1bb
TH
3013int ata_busy_sleep(struct ata_port *ap,
3014 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3015{
3016 unsigned long timer_start, timeout;
3017 u8 status;
3018
3019 status = ata_busy_wait(ap, ATA_BUSY, 300);
3020 timer_start = jiffies;
3021 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3022 while (status != 0xff && (status & ATA_BUSY) &&
3023 time_before(jiffies, timeout)) {
1da177e4
LT
3024 msleep(50);
3025 status = ata_busy_wait(ap, ATA_BUSY, 3);
3026 }
3027
d1adc1bb 3028 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3029 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3030 "port is slow to respond, please be patient "
3031 "(Status 0x%x)\n", status);
1da177e4
LT
3032
3033 timeout = timer_start + tmout;
d1adc1bb
TH
3034 while (status != 0xff && (status & ATA_BUSY) &&
3035 time_before(jiffies, timeout)) {
1da177e4
LT
3036 msleep(50);
3037 status = ata_chk_status(ap);
3038 }
3039
d1adc1bb
TH
3040 if (status == 0xff)
3041 return -ENODEV;
3042
1da177e4 3043 if (status & ATA_BUSY) {
f15a1daf 3044 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3045 "(%lu secs, Status 0x%x)\n",
3046 tmout / HZ, status);
d1adc1bb 3047 return -EBUSY;
1da177e4
LT
3048 }
3049
3050 return 0;
3051}
3052
d4b2bab4
TH
3053/**
3054 * ata_wait_ready - sleep until BSY clears, or timeout
3055 * @ap: port containing status register to be polled
3056 * @deadline: deadline jiffies for the operation
3057 *
3058 * Sleep until ATA Status register bit BSY clears, or timeout
3059 * occurs.
3060 *
3061 * LOCKING:
3062 * Kernel thread context (may sleep).
3063 *
3064 * RETURNS:
3065 * 0 on success, -errno otherwise.
3066 */
3067int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3068{
3069 unsigned long start = jiffies;
3070 int warned = 0;
3071
3072 while (1) {
3073 u8 status = ata_chk_status(ap);
3074 unsigned long now = jiffies;
3075
3076 if (!(status & ATA_BUSY))
3077 return 0;
936fd732 3078 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3079 return -ENODEV;
3080 if (time_after(now, deadline))
3081 return -EBUSY;
3082
3083 if (!warned && time_after(now, start + 5 * HZ) &&
3084 (deadline - now > 3 * HZ)) {
3085 ata_port_printk(ap, KERN_WARNING,
3086 "port is slow to respond, please be patient "
3087 "(Status 0x%x)\n", status);
3088 warned = 1;
3089 }
3090
3091 msleep(50);
3092 }
3093}
3094
3095static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3096 unsigned long deadline)
1da177e4
LT
3097{
3098 struct ata_ioports *ioaddr = &ap->ioaddr;
3099 unsigned int dev0 = devmask & (1 << 0);
3100 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3101 int rc, ret = 0;
1da177e4
LT
3102
3103 /* if device 0 was found in ata_devchk, wait for its
3104 * BSY bit to clear
3105 */
d4b2bab4
TH
3106 if (dev0) {
3107 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3108 if (rc) {
3109 if (rc != -ENODEV)
3110 return rc;
3111 ret = rc;
3112 }
d4b2bab4 3113 }
1da177e4 3114
e141d999
TH
3115 /* if device 1 was found in ata_devchk, wait for register
3116 * access briefly, then wait for BSY to clear.
1da177e4 3117 */
e141d999
TH
3118 if (dev1) {
3119 int i;
1da177e4
LT
3120
3121 ap->ops->dev_select(ap, 1);
e141d999
TH
3122
3123 /* Wait for register access. Some ATAPI devices fail
3124 * to set nsect/lbal after reset, so don't waste too
3125 * much time on it. We're gonna wait for !BSY anyway.
3126 */
3127 for (i = 0; i < 2; i++) {
3128 u8 nsect, lbal;
3129
3130 nsect = ioread8(ioaddr->nsect_addr);
3131 lbal = ioread8(ioaddr->lbal_addr);
3132 if ((nsect == 1) && (lbal == 1))
3133 break;
3134 msleep(50); /* give drive a breather */
3135 }
3136
d4b2bab4 3137 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3138 if (rc) {
3139 if (rc != -ENODEV)
3140 return rc;
3141 ret = rc;
3142 }
d4b2bab4 3143 }
1da177e4
LT
3144
3145 /* is all this really necessary? */
3146 ap->ops->dev_select(ap, 0);
3147 if (dev1)
3148 ap->ops->dev_select(ap, 1);
3149 if (dev0)
3150 ap->ops->dev_select(ap, 0);
d4b2bab4 3151
9b89391c 3152 return ret;
1da177e4
LT
3153}
3154
d4b2bab4
TH
3155static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3156 unsigned long deadline)
1da177e4
LT
3157{
3158 struct ata_ioports *ioaddr = &ap->ioaddr;
3159
44877b4e 3160 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3161
3162 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3163 iowrite8(ap->ctl, ioaddr->ctl_addr);
3164 udelay(20); /* FIXME: flush */
3165 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3166 udelay(20); /* FIXME: flush */
3167 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3168
3169 /* spec mandates ">= 2ms" before checking status.
3170 * We wait 150ms, because that was the magic delay used for
3171 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3172 * between when the ATA command register is written, and then
3173 * status is checked. Because waiting for "a while" before
3174 * checking status is fine, post SRST, we perform this magic
3175 * delay here as well.
09c7ad79
AC
3176 *
3177 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3178 */
3179 msleep(150);
3180
2e9edbf8 3181 /* Before we perform post reset processing we want to see if
298a41ca
TH
3182 * the bus shows 0xFF because the odd clown forgets the D7
3183 * pulldown resistor.
3184 */
d1adc1bb 3185 if (ata_check_status(ap) == 0xFF)
9b89391c 3186 return -ENODEV;
09c7ad79 3187
d4b2bab4 3188 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3189}
3190
3191/**
3192 * ata_bus_reset - reset host port and associated ATA channel
3193 * @ap: port to reset
3194 *
3195 * This is typically the first time we actually start issuing
3196 * commands to the ATA channel. We wait for BSY to clear, then
3197 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3198 * result. Determine what devices, if any, are on the channel
3199 * by looking at the device 0/1 error register. Look at the signature
3200 * stored in each device's taskfile registers, to determine if
3201 * the device is ATA or ATAPI.
3202 *
3203 * LOCKING:
0cba632b 3204 * PCI/etc. bus probe sem.
cca3974e 3205 * Obtains host lock.
1da177e4
LT
3206 *
3207 * SIDE EFFECTS:
198e0fed 3208 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3209 */
3210
3211void ata_bus_reset(struct ata_port *ap)
3212{
9af5c9c9 3213 struct ata_device *device = ap->link.device;
1da177e4
LT
3214 struct ata_ioports *ioaddr = &ap->ioaddr;
3215 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3216 u8 err;
aec5c3c1 3217 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3218 int rc;
1da177e4 3219
44877b4e 3220 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3221
3222 /* determine if device 0/1 are present */
3223 if (ap->flags & ATA_FLAG_SATA_RESET)
3224 dev0 = 1;
3225 else {
3226 dev0 = ata_devchk(ap, 0);
3227 if (slave_possible)
3228 dev1 = ata_devchk(ap, 1);
3229 }
3230
3231 if (dev0)
3232 devmask |= (1 << 0);
3233 if (dev1)
3234 devmask |= (1 << 1);
3235
3236 /* select device 0 again */
3237 ap->ops->dev_select(ap, 0);
3238
3239 /* issue bus reset */
9b89391c
TH
3240 if (ap->flags & ATA_FLAG_SRST) {
3241 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3242 if (rc && rc != -ENODEV)
aec5c3c1 3243 goto err_out;
9b89391c 3244 }
1da177e4
LT
3245
3246 /*
3247 * determine by signature whether we have ATA or ATAPI devices
3248 */
3f19859e 3249 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3250 if ((slave_possible) && (err != 0x81))
3f19859e 3251 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3252
1da177e4 3253 /* is double-select really necessary? */
9af5c9c9 3254 if (device[1].class != ATA_DEV_NONE)
1da177e4 3255 ap->ops->dev_select(ap, 1);
9af5c9c9 3256 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3257 ap->ops->dev_select(ap, 0);
3258
3259 /* if no devices were detected, disable this port */
9af5c9c9
TH
3260 if ((device[0].class == ATA_DEV_NONE) &&
3261 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3262 goto err_out;
3263
3264 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3265 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3266 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3267 }
3268
3269 DPRINTK("EXIT\n");
3270 return;
3271
3272err_out:
f15a1daf 3273 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3274 ata_port_disable(ap);
1da177e4
LT
3275
3276 DPRINTK("EXIT\n");
3277}
3278
d7bb4cc7 3279/**
936fd732
TH
3280 * sata_link_debounce - debounce SATA phy status
3281 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3282 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3283 * @deadline: deadline jiffies for the operation
d7bb4cc7 3284 *
936fd732 3285* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3286 * holding the same value where DET is not 1 for @duration polled
3287 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3288 * beginning of the stable state. Because DET gets stuck at 1 on
3289 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3290 * until timeout then returns 0 if DET is stable at 1.
3291 *
d4b2bab4
TH
3292 * @timeout is further limited by @deadline. The sooner of the
3293 * two is used.
3294 *
d7bb4cc7
TH
3295 * LOCKING:
3296 * Kernel thread context (may sleep)
3297 *
3298 * RETURNS:
3299 * 0 on success, -errno on failure.
3300 */
936fd732
TH
3301int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3302 unsigned long deadline)
7a7921e8 3303{
d7bb4cc7 3304 unsigned long interval_msec = params[0];
d4b2bab4
TH
3305 unsigned long duration = msecs_to_jiffies(params[1]);
3306 unsigned long last_jiffies, t;
d7bb4cc7
TH
3307 u32 last, cur;
3308 int rc;
3309
d4b2bab4
TH
3310 t = jiffies + msecs_to_jiffies(params[2]);
3311 if (time_before(t, deadline))
3312 deadline = t;
3313
936fd732 3314 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3315 return rc;
3316 cur &= 0xf;
3317
3318 last = cur;
3319 last_jiffies = jiffies;
3320
3321 while (1) {
3322 msleep(interval_msec);
936fd732 3323 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3324 return rc;
3325 cur &= 0xf;
3326
3327 /* DET stable? */
3328 if (cur == last) {
d4b2bab4 3329 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3330 continue;
3331 if (time_after(jiffies, last_jiffies + duration))
3332 return 0;
3333 continue;
3334 }
3335
3336 /* unstable, start over */
3337 last = cur;
3338 last_jiffies = jiffies;
3339
f1545154
TH
3340 /* Check deadline. If debouncing failed, return
3341 * -EPIPE to tell upper layer to lower link speed.
3342 */
d4b2bab4 3343 if (time_after(jiffies, deadline))
f1545154 3344 return -EPIPE;
d7bb4cc7
TH
3345 }
3346}
3347
3348/**
936fd732
TH
3349 * sata_link_resume - resume SATA link
3350 * @link: ATA link to resume SATA
d7bb4cc7 3351 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3352 * @deadline: deadline jiffies for the operation
d7bb4cc7 3353 *
936fd732 3354 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3355 *
3356 * LOCKING:
3357 * Kernel thread context (may sleep)
3358 *
3359 * RETURNS:
3360 * 0 on success, -errno on failure.
3361 */
936fd732
TH
3362int sata_link_resume(struct ata_link *link, const unsigned long *params,
3363 unsigned long deadline)
d7bb4cc7
TH
3364{
3365 u32 scontrol;
81952c54
TH
3366 int rc;
3367
936fd732 3368 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3369 return rc;
7a7921e8 3370
852ee16a 3371 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3372
936fd732 3373 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3374 return rc;
7a7921e8 3375
d7bb4cc7
TH
3376 /* Some PHYs react badly if SStatus is pounded immediately
3377 * after resuming. Delay 200ms before debouncing.
3378 */
3379 msleep(200);
7a7921e8 3380
936fd732 3381 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3382}
3383
f5914a46
TH
3384/**
3385 * ata_std_prereset - prepare for reset
cc0680a5 3386 * @link: ATA link to be reset
d4b2bab4 3387 * @deadline: deadline jiffies for the operation
f5914a46 3388 *
cc0680a5 3389 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3390 * prereset makes libata abort whole reset sequence and give up
3391 * that port, so prereset should be best-effort. It does its
3392 * best to prepare for reset sequence but if things go wrong, it
3393 * should just whine, not fail.
f5914a46
TH
3394 *
3395 * LOCKING:
3396 * Kernel thread context (may sleep)
3397 *
3398 * RETURNS:
3399 * 0 on success, -errno otherwise.
3400 */
cc0680a5 3401int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3402{
cc0680a5 3403 struct ata_port *ap = link->ap;
936fd732 3404 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3405 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3406 int rc;
3407
31daabda 3408 /* handle link resume */
28324304 3409 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3410 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3411 ehc->i.action |= ATA_EH_HARDRESET;
3412
f5914a46
TH
3413 /* if we're about to do hardreset, nothing more to do */
3414 if (ehc->i.action & ATA_EH_HARDRESET)
3415 return 0;
3416
936fd732 3417 /* if SATA, resume link */
a16abc0b 3418 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3419 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3420 /* whine about phy resume failure but proceed */
3421 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3422 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3423 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3424 }
3425
3426 /* Wait for !BSY if the controller can wait for the first D2H
3427 * Reg FIS and we don't know that no device is attached.
3428 */
0c88758b 3429 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3430 rc = ata_wait_ready(ap, deadline);
6dffaf61 3431 if (rc && rc != -ENODEV) {
cc0680a5 3432 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3433 "(errno=%d), forcing hardreset\n", rc);
3434 ehc->i.action |= ATA_EH_HARDRESET;
3435 }
3436 }
f5914a46
TH
3437
3438 return 0;
3439}
3440
c2bd5804
TH
3441/**
3442 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3443 * @link: ATA link to reset
c2bd5804 3444 * @classes: resulting classes of attached devices
d4b2bab4 3445 * @deadline: deadline jiffies for the operation
c2bd5804 3446 *
52783c5d 3447 * Reset host port using ATA SRST.
c2bd5804
TH
3448 *
3449 * LOCKING:
3450 * Kernel thread context (may sleep)
3451 *
3452 * RETURNS:
3453 * 0 on success, -errno otherwise.
3454 */
cc0680a5 3455int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3456 unsigned long deadline)
c2bd5804 3457{
cc0680a5 3458 struct ata_port *ap = link->ap;
c2bd5804 3459 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3460 unsigned int devmask = 0;
3461 int rc;
c2bd5804
TH
3462 u8 err;
3463
3464 DPRINTK("ENTER\n");
3465
936fd732 3466 if (ata_link_offline(link)) {
3a39746a
TH
3467 classes[0] = ATA_DEV_NONE;
3468 goto out;
3469 }
3470
c2bd5804
TH
3471 /* determine if device 0/1 are present */
3472 if (ata_devchk(ap, 0))
3473 devmask |= (1 << 0);
3474 if (slave_possible && ata_devchk(ap, 1))
3475 devmask |= (1 << 1);
3476
c2bd5804
TH
3477 /* select device 0 again */
3478 ap->ops->dev_select(ap, 0);
3479
3480 /* issue bus reset */
3481 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3482 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3483 /* if link is occupied, -ENODEV too is an error */
936fd732 3484 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3485 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3486 return rc;
c2bd5804
TH
3487 }
3488
3489 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3490 classes[0] = ata_dev_try_classify(&link->device[0],
3491 devmask & (1 << 0), &err);
c2bd5804 3492 if (slave_possible && err != 0x81)
3f19859e
TH
3493 classes[1] = ata_dev_try_classify(&link->device[1],
3494 devmask & (1 << 1), &err);
c2bd5804 3495
3a39746a 3496 out:
c2bd5804
TH
3497 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3498 return 0;
3499}
3500
3501/**
cc0680a5
TH
3502 * sata_link_hardreset - reset link via SATA phy reset
3503 * @link: link to reset
b6103f6d 3504 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3505 * @deadline: deadline jiffies for the operation
c2bd5804 3506 *
cc0680a5 3507 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3508 *
3509 * LOCKING:
3510 * Kernel thread context (may sleep)
3511 *
3512 * RETURNS:
3513 * 0 on success, -errno otherwise.
3514 */
cc0680a5 3515int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3516 unsigned long deadline)
c2bd5804 3517{
852ee16a 3518 u32 scontrol;
81952c54 3519 int rc;
852ee16a 3520
c2bd5804
TH
3521 DPRINTK("ENTER\n");
3522
936fd732 3523 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3524 /* SATA spec says nothing about how to reconfigure
3525 * spd. To be on the safe side, turn off phy during
3526 * reconfiguration. This works for at least ICH7 AHCI
3527 * and Sil3124.
3528 */
936fd732 3529 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3530 goto out;
81952c54 3531
a34b6fc0 3532 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3533
936fd732 3534 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3535 goto out;
1c3fae4d 3536
936fd732 3537 sata_set_spd(link);
1c3fae4d
TH
3538 }
3539
3540 /* issue phy wake/reset */
936fd732 3541 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3542 goto out;
81952c54 3543
852ee16a 3544 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3545
936fd732 3546 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3547 goto out;
c2bd5804 3548
1c3fae4d 3549 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3550 * 10.4.2 says at least 1 ms.
3551 */
3552 msleep(1);
3553
936fd732
TH
3554 /* bring link back */
3555 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3556 out:
3557 DPRINTK("EXIT, rc=%d\n", rc);
3558 return rc;
3559}
3560
3561/**
3562 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3563 * @link: link to reset
b6103f6d 3564 * @class: resulting class of attached device
d4b2bab4 3565 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3566 *
3567 * SATA phy-reset host port using DET bits of SControl register,
3568 * wait for !BSY and classify the attached device.
3569 *
3570 * LOCKING:
3571 * Kernel thread context (may sleep)
3572 *
3573 * RETURNS:
3574 * 0 on success, -errno otherwise.
3575 */
cc0680a5 3576int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3577 unsigned long deadline)
b6103f6d 3578{
cc0680a5 3579 struct ata_port *ap = link->ap;
936fd732 3580 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3581 int rc;
3582
3583 DPRINTK("ENTER\n");
3584
3585 /* do hardreset */
cc0680a5 3586 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3587 if (rc) {
cc0680a5 3588 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3589 "COMRESET failed (errno=%d)\n", rc);
3590 return rc;
3591 }
c2bd5804 3592
c2bd5804 3593 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3594 if (ata_link_offline(link)) {
c2bd5804
TH
3595 *class = ATA_DEV_NONE;
3596 DPRINTK("EXIT, link offline\n");
3597 return 0;
3598 }
3599
34fee227
TH
3600 /* wait a while before checking status, see SRST for more info */
3601 msleep(150);
3602
d4b2bab4 3603 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3604 /* link occupied, -ENODEV too is an error */
3605 if (rc) {
cc0680a5 3606 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3607 "COMRESET failed (errno=%d)\n", rc);
3608 return rc;
c2bd5804
TH
3609 }
3610
3a39746a
TH
3611 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3612
3f19859e 3613 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3614
3615 DPRINTK("EXIT, class=%u\n", *class);
3616 return 0;
3617}
3618
3619/**
3620 * ata_std_postreset - standard postreset callback
cc0680a5 3621 * @link: the target ata_link
c2bd5804
TH
3622 * @classes: classes of attached devices
3623 *
3624 * This function is invoked after a successful reset. Note that
3625 * the device might have been reset more than once using
3626 * different reset methods before postreset is invoked.
c2bd5804 3627 *
c2bd5804
TH
3628 * LOCKING:
3629 * Kernel thread context (may sleep)
3630 */
cc0680a5 3631void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3632{
cc0680a5 3633 struct ata_port *ap = link->ap;
dc2b3515
TH
3634 u32 serror;
3635
c2bd5804
TH
3636 DPRINTK("ENTER\n");
3637
c2bd5804 3638 /* print link status */
936fd732 3639 sata_print_link_status(link);
c2bd5804 3640
dc2b3515 3641 /* clear SError */
936fd732
TH
3642 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3643 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3644
c2bd5804
TH
3645 /* is double-select really necessary? */
3646 if (classes[0] != ATA_DEV_NONE)
3647 ap->ops->dev_select(ap, 1);
3648 if (classes[1] != ATA_DEV_NONE)
3649 ap->ops->dev_select(ap, 0);
3650
3a39746a
TH
3651 /* bail out if no device is present */
3652 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3653 DPRINTK("EXIT, no device\n");
3654 return;
3655 }
3656
3657 /* set up device control */
0d5ff566
TH
3658 if (ap->ioaddr.ctl_addr)
3659 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3660
3661 DPRINTK("EXIT\n");
3662}
3663
623a3128
TH
3664/**
3665 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3666 * @dev: device to compare against
3667 * @new_class: class of the new device
3668 * @new_id: IDENTIFY page of the new device
3669 *
3670 * Compare @new_class and @new_id against @dev and determine
3671 * whether @dev is the device indicated by @new_class and
3672 * @new_id.
3673 *
3674 * LOCKING:
3675 * None.
3676 *
3677 * RETURNS:
3678 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3679 */
3373efd8
TH
3680static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3681 const u16 *new_id)
623a3128
TH
3682{
3683 const u16 *old_id = dev->id;
a0cf733b
TH
3684 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3685 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3686
3687 if (dev->class != new_class) {
f15a1daf
TH
3688 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3689 dev->class, new_class);
623a3128
TH
3690 return 0;
3691 }
3692
a0cf733b
TH
3693 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3694 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3695 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3696 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3697
3698 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3699 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3700 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3701 return 0;
3702 }
3703
3704 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3705 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3706 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3707 return 0;
3708 }
3709
623a3128
TH
3710 return 1;
3711}
3712
3713/**
fe30911b 3714 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3715 * @dev: target ATA device
bff04647 3716 * @readid_flags: read ID flags
623a3128
TH
3717 *
3718 * Re-read IDENTIFY page and make sure @dev is still attached to
3719 * the port.
3720 *
3721 * LOCKING:
3722 * Kernel thread context (may sleep)
3723 *
3724 * RETURNS:
3725 * 0 on success, negative errno otherwise
3726 */
fe30911b 3727int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3728{
5eb45c02 3729 unsigned int class = dev->class;
9af5c9c9 3730 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3731 int rc;
3732
fe635c7e 3733 /* read ID data */
bff04647 3734 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3735 if (rc)
fe30911b 3736 return rc;
623a3128
TH
3737
3738 /* is the device still there? */
fe30911b
TH
3739 if (!ata_dev_same_device(dev, class, id))
3740 return -ENODEV;
623a3128 3741
fe635c7e 3742 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3743 return 0;
3744}
3745
3746/**
3747 * ata_dev_revalidate - Revalidate ATA device
3748 * @dev: device to revalidate
3749 * @readid_flags: read ID flags
3750 *
3751 * Re-read IDENTIFY page, make sure @dev is still attached to the
3752 * port and reconfigure it according to the new IDENTIFY page.
3753 *
3754 * LOCKING:
3755 * Kernel thread context (may sleep)
3756 *
3757 * RETURNS:
3758 * 0 on success, negative errno otherwise
3759 */
3760int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3761{
6ddcd3b0 3762 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3763 int rc;
3764
3765 if (!ata_dev_enabled(dev))
3766 return -ENODEV;
3767
3768 /* re-read ID */
3769 rc = ata_dev_reread_id(dev, readid_flags);
3770 if (rc)
3771 goto fail;
623a3128
TH
3772
3773 /* configure device according to the new ID */
efdaedc4 3774 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3775 if (rc)
3776 goto fail;
3777
3778 /* verify n_sectors hasn't changed */
b54eebd6
TH
3779 if (dev->class == ATA_DEV_ATA && n_sectors &&
3780 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3781 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3782 "%llu != %llu\n",
3783 (unsigned long long)n_sectors,
3784 (unsigned long long)dev->n_sectors);
8270bec4
TH
3785
3786 /* restore original n_sectors */
3787 dev->n_sectors = n_sectors;
3788
6ddcd3b0
TH
3789 rc = -ENODEV;
3790 goto fail;
3791 }
3792
3793 return 0;
623a3128
TH
3794
3795 fail:
f15a1daf 3796 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3797 return rc;
3798}
3799
6919a0a6
AC
3800struct ata_blacklist_entry {
3801 const char *model_num;
3802 const char *model_rev;
3803 unsigned long horkage;
3804};
3805
3806static const struct ata_blacklist_entry ata_device_blacklist [] = {
3807 /* Devices with DMA related problems under Linux */
3808 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3809 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3810 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3811 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3812 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3813 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3814 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3815 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3816 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3817 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3818 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3819 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3820 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3821 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3822 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3823 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3824 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3825 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3826 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3827 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3828 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3829 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3830 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3831 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3832 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3833 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3834 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3835 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3836 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
39f19886 3837 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
5acd50f6 3838 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
39ce7128
TH
3839 { "IOMEGA ZIP 250 ATAPI Floppy",
3840 NULL, ATA_HORKAGE_NODMA },
6919a0a6 3841
18d6e9d5 3842 /* Weird ATAPI devices */
40a1d531 3843 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3844
6919a0a6
AC
3845 /* Devices we expect to fail diagnostics */
3846
3847 /* Devices where NCQ should be avoided */
3848 /* NCQ is slow */
3849 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3850 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3851 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3852 /* NCQ is broken */
539cc7c7 3853 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3854 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
2f8d90ab 3855 { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
539cc7c7
JG
3856 ATA_HORKAGE_NONCQ },
3857
36e337d0
RH
3858 /* Blacklist entries taken from Silicon Image 3124/3132
3859 Windows driver .inf file - also several Linux problem reports */
3860 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3861 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3862 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
3863 /* Drives which do spurious command completion */
3864 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 3865 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
e14cbfa6 3866 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
2f8fcebb 3867 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
a520f261 3868 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3fb6589c 3869 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
0e3dbc01 3870 { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, },
5d6aca8d 3871 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
6919a0a6 3872
16c55b03
TH
3873 /* devices which puke on READ_NATIVE_MAX */
3874 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3875 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3876 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3877 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6
AC
3878
3879 /* End Marker */
3880 { }
1da177e4 3881};
2e9edbf8 3882
539cc7c7
JG
3883int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3884{
3885 const char *p;
3886 int len;
3887
3888 /*
3889 * check for trailing wildcard: *\0
3890 */
3891 p = strchr(patt, wildchar);
3892 if (p && ((*(p + 1)) == 0))
3893 len = p - patt;
3894 else
3895 len = strlen(name);
3896
3897 return strncmp(patt, name, len);
3898}
3899
75683fe7 3900static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 3901{
8bfa79fc
TH
3902 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3903 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3904 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3905
8bfa79fc
TH
3906 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3907 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3908
6919a0a6 3909 while (ad->model_num) {
539cc7c7 3910 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
3911 if (ad->model_rev == NULL)
3912 return ad->horkage;
539cc7c7 3913 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 3914 return ad->horkage;
f4b15fef 3915 }
6919a0a6 3916 ad++;
f4b15fef 3917 }
1da177e4
LT
3918 return 0;
3919}
3920
6919a0a6
AC
3921static int ata_dma_blacklisted(const struct ata_device *dev)
3922{
3923 /* We don't support polling DMA.
3924 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3925 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3926 */
9af5c9c9 3927 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
3928 (dev->flags & ATA_DFLAG_CDB_INTR))
3929 return 1;
75683fe7 3930 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
3931}
3932
a6d5a51c
TH
3933/**
3934 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3935 * @dev: Device to compute xfermask for
3936 *
acf356b1
TH
3937 * Compute supported xfermask of @dev and store it in
3938 * dev->*_mask. This function is responsible for applying all
3939 * known limits including host controller limits, device
3940 * blacklist, etc...
a6d5a51c
TH
3941 *
3942 * LOCKING:
3943 * None.
a6d5a51c 3944 */
3373efd8 3945static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3946{
9af5c9c9
TH
3947 struct ata_link *link = dev->link;
3948 struct ata_port *ap = link->ap;
cca3974e 3949 struct ata_host *host = ap->host;
a6d5a51c 3950 unsigned long xfer_mask;
1da177e4 3951
37deecb5 3952 /* controller modes available */
565083e1
TH
3953 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3954 ap->mwdma_mask, ap->udma_mask);
3955
8343f889 3956 /* drive modes available */
37deecb5
TH
3957 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3958 dev->mwdma_mask, dev->udma_mask);
3959 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3960
b352e57d
AC
3961 /*
3962 * CFA Advanced TrueIDE timings are not allowed on a shared
3963 * cable
3964 */
3965 if (ata_dev_pair(dev)) {
3966 /* No PIO5 or PIO6 */
3967 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3968 /* No MWDMA3 or MWDMA 4 */
3969 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3970 }
3971
37deecb5
TH
3972 if (ata_dma_blacklisted(dev)) {
3973 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3974 ata_dev_printk(dev, KERN_WARNING,
3975 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3976 }
a6d5a51c 3977
14d66ab7
PV
3978 if ((host->flags & ATA_HOST_SIMPLEX) &&
3979 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3980 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3981 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3982 "other device, disabling DMA\n");
5444a6f4 3983 }
565083e1 3984
e424675f
JG
3985 if (ap->flags & ATA_FLAG_NO_IORDY)
3986 xfer_mask &= ata_pio_mask_no_iordy(dev);
3987
5444a6f4 3988 if (ap->ops->mode_filter)
a76b62ca 3989 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 3990
8343f889
RH
3991 /* Apply cable rule here. Don't apply it early because when
3992 * we handle hot plug the cable type can itself change.
3993 * Check this last so that we know if the transfer rate was
3994 * solely limited by the cable.
3995 * Unknown or 80 wire cables reported host side are checked
3996 * drive side as well. Cases where we know a 40wire cable
3997 * is used safely for 80 are not checked here.
3998 */
3999 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4000 /* UDMA/44 or higher would be available */
4001 if((ap->cbl == ATA_CBL_PATA40) ||
4002 (ata_drive_40wire(dev->id) &&
4003 (ap->cbl == ATA_CBL_PATA_UNK ||
4004 ap->cbl == ATA_CBL_PATA80))) {
4005 ata_dev_printk(dev, KERN_WARNING,
4006 "limited to UDMA/33 due to 40-wire cable\n");
4007 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4008 }
4009
565083e1
TH
4010 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4011 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4012}
4013
1da177e4
LT
4014/**
4015 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4016 * @dev: Device to which command will be sent
4017 *
780a87f7
JG
4018 * Issue SET FEATURES - XFER MODE command to device @dev
4019 * on port @ap.
4020 *
1da177e4 4021 * LOCKING:
0cba632b 4022 * PCI/etc. bus probe sem.
83206a29
TH
4023 *
4024 * RETURNS:
4025 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4026 */
4027
3373efd8 4028static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4029{
a0123703 4030 struct ata_taskfile tf;
83206a29 4031 unsigned int err_mask;
1da177e4
LT
4032
4033 /* set up set-features taskfile */
4034 DPRINTK("set features - xfer mode\n");
4035
464cf177
TH
4036 /* Some controllers and ATAPI devices show flaky interrupt
4037 * behavior after setting xfer mode. Use polling instead.
4038 */
3373efd8 4039 ata_tf_init(dev, &tf);
a0123703
TH
4040 tf.command = ATA_CMD_SET_FEATURES;
4041 tf.feature = SETFEATURES_XFER;
464cf177 4042 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
4043 tf.protocol = ATA_PROT_NODATA;
4044 tf.nsect = dev->xfer_mode;
1da177e4 4045
3373efd8 4046 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9f45cbd3
KCA
4047
4048 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4049 return err_mask;
4050}
4051
4052/**
4053 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4054 * @dev: Device to which command will be sent
4055 * @enable: Whether to enable or disable the feature
4056 *
4057 * Issue SET FEATURES - SATA FEATURES command to device @dev
4058 * on port @ap with sector count set to indicate Asynchronous
4059 * Notification feature
4060 *
4061 * LOCKING:
4062 * PCI/etc. bus probe sem.
4063 *
4064 * RETURNS:
4065 * 0 on success, AC_ERR_* mask otherwise.
4066 */
4067static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4068{
4069 struct ata_taskfile tf;
4070 unsigned int err_mask;
4071
4072 /* set up set-features taskfile */
4073 DPRINTK("set features - SATA features\n");
4074
4075 ata_tf_init(dev, &tf);
4076 tf.command = ATA_CMD_SET_FEATURES;
4077 tf.feature = enable;
4078 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4079 tf.protocol = ATA_PROT_NODATA;
4080 tf.nsect = SATA_AN;
4081
4082 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 4083
83206a29
TH
4084 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4085 return err_mask;
1da177e4
LT
4086}
4087
8bf62ece
AL
4088/**
4089 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4090 * @dev: Device to which command will be sent
e2a7f77a
RD
4091 * @heads: Number of heads (taskfile parameter)
4092 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4093 *
4094 * LOCKING:
6aff8f1f
TH
4095 * Kernel thread context (may sleep)
4096 *
4097 * RETURNS:
4098 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4099 */
3373efd8
TH
4100static unsigned int ata_dev_init_params(struct ata_device *dev,
4101 u16 heads, u16 sectors)
8bf62ece 4102{
a0123703 4103 struct ata_taskfile tf;
6aff8f1f 4104 unsigned int err_mask;
8bf62ece
AL
4105
4106 /* Number of sectors per track 1-255. Number of heads 1-16 */
4107 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4108 return AC_ERR_INVALID;
8bf62ece
AL
4109
4110 /* set up init dev params taskfile */
4111 DPRINTK("init dev params \n");
4112
3373efd8 4113 ata_tf_init(dev, &tf);
a0123703
TH
4114 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4115 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4116 tf.protocol = ATA_PROT_NODATA;
4117 tf.nsect = sectors;
4118 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4119
3373efd8 4120 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
18b2466c
AC
4121 /* A clean abort indicates an original or just out of spec drive
4122 and we should continue as we issue the setup based on the
4123 drive reported working geometry */
4124 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4125 err_mask = 0;
8bf62ece 4126
6aff8f1f
TH
4127 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4128 return err_mask;
8bf62ece
AL
4129}
4130
1da177e4 4131/**
0cba632b
JG
4132 * ata_sg_clean - Unmap DMA memory associated with command
4133 * @qc: Command containing DMA memory to be released
4134 *
4135 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4136 *
4137 * LOCKING:
cca3974e 4138 * spin_lock_irqsave(host lock)
1da177e4 4139 */
70e6ad0c 4140void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4141{
4142 struct ata_port *ap = qc->ap;
cedc9a47 4143 struct scatterlist *sg = qc->__sg;
1da177e4 4144 int dir = qc->dma_dir;
cedc9a47 4145 void *pad_buf = NULL;
1da177e4 4146
a4631474
TH
4147 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4148 WARN_ON(sg == NULL);
1da177e4
LT
4149
4150 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4151 WARN_ON(qc->n_elem > 1);
1da177e4 4152
2c13b7ce 4153 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4154
cedc9a47
JG
4155 /* if we padded the buffer out to 32-bit bound, and data
4156 * xfer direction is from-device, we must copy from the
4157 * pad buffer back into the supplied buffer
4158 */
4159 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4160 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4161
4162 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4163 if (qc->n_elem)
2f1f610b 4164 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
4165 /* restore last sg */
4166 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4167 if (pad_buf) {
4168 struct scatterlist *psg = &qc->pad_sgent;
4169 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4170 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4171 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4172 }
4173 } else {
2e242fa9 4174 if (qc->n_elem)
2f1f610b 4175 dma_unmap_single(ap->dev,
e1410f2d
JG
4176 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4177 dir);
cedc9a47
JG
4178 /* restore sg */
4179 sg->length += qc->pad_len;
4180 if (pad_buf)
4181 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4182 pad_buf, qc->pad_len);
4183 }
1da177e4
LT
4184
4185 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4186 qc->__sg = NULL;
1da177e4
LT
4187}
4188
4189/**
4190 * ata_fill_sg - Fill PCI IDE PRD table
4191 * @qc: Metadata associated with taskfile to be transferred
4192 *
780a87f7
JG
4193 * Fill PCI IDE PRD (scatter-gather) table with segments
4194 * associated with the current disk command.
4195 *
1da177e4 4196 * LOCKING:
cca3974e 4197 * spin_lock_irqsave(host lock)
1da177e4
LT
4198 *
4199 */
4200static void ata_fill_sg(struct ata_queued_cmd *qc)
4201{
1da177e4 4202 struct ata_port *ap = qc->ap;
cedc9a47
JG
4203 struct scatterlist *sg;
4204 unsigned int idx;
1da177e4 4205
a4631474 4206 WARN_ON(qc->__sg == NULL);
f131883e 4207 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4208
4209 idx = 0;
cedc9a47 4210 ata_for_each_sg(sg, qc) {
1da177e4
LT
4211 u32 addr, offset;
4212 u32 sg_len, len;
4213
4214 /* determine if physical DMA addr spans 64K boundary.
4215 * Note h/w doesn't support 64-bit, so we unconditionally
4216 * truncate dma_addr_t to u32.
4217 */
4218 addr = (u32) sg_dma_address(sg);
4219 sg_len = sg_dma_len(sg);
4220
4221 while (sg_len) {
4222 offset = addr & 0xffff;
4223 len = sg_len;
4224 if ((offset + sg_len) > 0x10000)
4225 len = 0x10000 - offset;
4226
4227 ap->prd[idx].addr = cpu_to_le32(addr);
4228 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4229 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4230
4231 idx++;
4232 sg_len -= len;
4233 addr += len;
4234 }
4235 }
4236
4237 if (idx)
4238 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4239}
b9a4197e 4240
d26fc955
AC
4241/**
4242 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4243 * @qc: Metadata associated with taskfile to be transferred
4244 *
4245 * Fill PCI IDE PRD (scatter-gather) table with segments
4246 * associated with the current disk command. Perform the fill
4247 * so that we avoid writing any length 64K records for
4248 * controllers that don't follow the spec.
4249 *
4250 * LOCKING:
4251 * spin_lock_irqsave(host lock)
4252 *
4253 */
4254static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4255{
4256 struct ata_port *ap = qc->ap;
4257 struct scatterlist *sg;
4258 unsigned int idx;
4259
4260 WARN_ON(qc->__sg == NULL);
4261 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4262
4263 idx = 0;
4264 ata_for_each_sg(sg, qc) {
4265 u32 addr, offset;
4266 u32 sg_len, len, blen;
4267
4268 /* determine if physical DMA addr spans 64K boundary.
4269 * Note h/w doesn't support 64-bit, so we unconditionally
4270 * truncate dma_addr_t to u32.
4271 */
4272 addr = (u32) sg_dma_address(sg);
4273 sg_len = sg_dma_len(sg);
4274
4275 while (sg_len) {
4276 offset = addr & 0xffff;
4277 len = sg_len;
4278 if ((offset + sg_len) > 0x10000)
4279 len = 0x10000 - offset;
4280
4281 blen = len & 0xffff;
4282 ap->prd[idx].addr = cpu_to_le32(addr);
4283 if (blen == 0) {
4284 /* Some PATA chipsets like the CS5530 can't
4285 cope with 0x0000 meaning 64K as the spec says */
4286 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4287 blen = 0x8000;
4288 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4289 }
4290 ap->prd[idx].flags_len = cpu_to_le32(blen);
4291 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4292
4293 idx++;
4294 sg_len -= len;
4295 addr += len;
4296 }
4297 }
4298
4299 if (idx)
4300 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4301}
4302
1da177e4
LT
4303/**
4304 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4305 * @qc: Metadata associated with taskfile to check
4306 *
780a87f7
JG
4307 * Allow low-level driver to filter ATA PACKET commands, returning
4308 * a status indicating whether or not it is OK to use DMA for the
4309 * supplied PACKET command.
4310 *
1da177e4 4311 * LOCKING:
cca3974e 4312 * spin_lock_irqsave(host lock)
0cba632b 4313 *
1da177e4
LT
4314 * RETURNS: 0 when ATAPI DMA can be used
4315 * nonzero otherwise
4316 */
4317int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4318{
4319 struct ata_port *ap = qc->ap;
b9a4197e
TH
4320
4321 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4322 * few ATAPI devices choke on such DMA requests.
4323 */
4324 if (unlikely(qc->nbytes & 15))
4325 return 1;
6f23a31d 4326
1da177e4 4327 if (ap->ops->check_atapi_dma)
b9a4197e 4328 return ap->ops->check_atapi_dma(qc);
1da177e4 4329
b9a4197e 4330 return 0;
1da177e4 4331}
b9a4197e 4332
1da177e4
LT
4333/**
4334 * ata_qc_prep - Prepare taskfile for submission
4335 * @qc: Metadata associated with taskfile to be prepared
4336 *
780a87f7
JG
4337 * Prepare ATA taskfile for submission.
4338 *
1da177e4 4339 * LOCKING:
cca3974e 4340 * spin_lock_irqsave(host lock)
1da177e4
LT
4341 */
4342void ata_qc_prep(struct ata_queued_cmd *qc)
4343{
4344 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4345 return;
4346
4347 ata_fill_sg(qc);
4348}
4349
d26fc955
AC
4350/**
4351 * ata_dumb_qc_prep - Prepare taskfile for submission
4352 * @qc: Metadata associated with taskfile to be prepared
4353 *
4354 * Prepare ATA taskfile for submission.
4355 *
4356 * LOCKING:
4357 * spin_lock_irqsave(host lock)
4358 */
4359void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4360{
4361 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4362 return;
4363
4364 ata_fill_sg_dumb(qc);
4365}
4366
e46834cd
BK
4367void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4368
0cba632b
JG
4369/**
4370 * ata_sg_init_one - Associate command with memory buffer
4371 * @qc: Command to be associated
4372 * @buf: Memory buffer
4373 * @buflen: Length of memory buffer, in bytes.
4374 *
4375 * Initialize the data-related elements of queued_cmd @qc
4376 * to point to a single memory buffer, @buf of byte length @buflen.
4377 *
4378 * LOCKING:
cca3974e 4379 * spin_lock_irqsave(host lock)
0cba632b
JG
4380 */
4381
1da177e4
LT
4382void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4383{
1da177e4
LT
4384 qc->flags |= ATA_QCFLAG_SINGLE;
4385
cedc9a47 4386 qc->__sg = &qc->sgent;
1da177e4 4387 qc->n_elem = 1;
cedc9a47 4388 qc->orig_n_elem = 1;
1da177e4 4389 qc->buf_virt = buf;
233277ca 4390 qc->nbytes = buflen;
1da177e4 4391
61c0596c 4392 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4393}
4394
0cba632b
JG
4395/**
4396 * ata_sg_init - Associate command with scatter-gather table.
4397 * @qc: Command to be associated
4398 * @sg: Scatter-gather table.
4399 * @n_elem: Number of elements in s/g table.
4400 *
4401 * Initialize the data-related elements of queued_cmd @qc
4402 * to point to a scatter-gather table @sg, containing @n_elem
4403 * elements.
4404 *
4405 * LOCKING:
cca3974e 4406 * spin_lock_irqsave(host lock)
0cba632b
JG
4407 */
4408
1da177e4
LT
4409void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4410 unsigned int n_elem)
4411{
4412 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4413 qc->__sg = sg;
1da177e4 4414 qc->n_elem = n_elem;
cedc9a47 4415 qc->orig_n_elem = n_elem;
1da177e4
LT
4416}
4417
4418/**
0cba632b
JG
4419 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4420 * @qc: Command with memory buffer to be mapped.
4421 *
4422 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4423 *
4424 * LOCKING:
cca3974e 4425 * spin_lock_irqsave(host lock)
1da177e4
LT
4426 *
4427 * RETURNS:
0cba632b 4428 * Zero on success, negative on error.
1da177e4
LT
4429 */
4430
4431static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4432{
4433 struct ata_port *ap = qc->ap;
4434 int dir = qc->dma_dir;
cedc9a47 4435 struct scatterlist *sg = qc->__sg;
1da177e4 4436 dma_addr_t dma_address;
2e242fa9 4437 int trim_sg = 0;
1da177e4 4438
cedc9a47
JG
4439 /* we must lengthen transfers to end on a 32-bit boundary */
4440 qc->pad_len = sg->length & 3;
4441 if (qc->pad_len) {
4442 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4443 struct scatterlist *psg = &qc->pad_sgent;
4444
a4631474 4445 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4446
4447 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4448
4449 if (qc->tf.flags & ATA_TFLAG_WRITE)
4450 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4451 qc->pad_len);
4452
4453 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4454 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4455 /* trim sg */
4456 sg->length -= qc->pad_len;
2e242fa9
TH
4457 if (sg->length == 0)
4458 trim_sg = 1;
cedc9a47
JG
4459
4460 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4461 sg->length, qc->pad_len);
4462 }
4463
2e242fa9
TH
4464 if (trim_sg) {
4465 qc->n_elem--;
e1410f2d
JG
4466 goto skip_map;
4467 }
4468
2f1f610b 4469 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4470 sg->length, dir);
537a95d9
TH
4471 if (dma_mapping_error(dma_address)) {
4472 /* restore sg */
4473 sg->length += qc->pad_len;
1da177e4 4474 return -1;
537a95d9 4475 }
1da177e4
LT
4476
4477 sg_dma_address(sg) = dma_address;
32529e01 4478 sg_dma_len(sg) = sg->length;
1da177e4 4479
2e242fa9 4480skip_map:
1da177e4
LT
4481 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4482 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4483
4484 return 0;
4485}
4486
4487/**
0cba632b
JG
4488 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4489 * @qc: Command with scatter-gather table to be mapped.
4490 *
4491 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4492 *
4493 * LOCKING:
cca3974e 4494 * spin_lock_irqsave(host lock)
1da177e4
LT
4495 *
4496 * RETURNS:
0cba632b 4497 * Zero on success, negative on error.
1da177e4
LT
4498 *
4499 */
4500
4501static int ata_sg_setup(struct ata_queued_cmd *qc)
4502{
4503 struct ata_port *ap = qc->ap;
cedc9a47
JG
4504 struct scatterlist *sg = qc->__sg;
4505 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4506 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4507
44877b4e 4508 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4509 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4510
cedc9a47
JG
4511 /* we must lengthen transfers to end on a 32-bit boundary */
4512 qc->pad_len = lsg->length & 3;
4513 if (qc->pad_len) {
4514 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4515 struct scatterlist *psg = &qc->pad_sgent;
4516 unsigned int offset;
4517
a4631474 4518 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4519
4520 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4521
4522 /*
4523 * psg->page/offset are used to copy to-be-written
4524 * data in this function or read data in ata_sg_clean.
4525 */
4526 offset = lsg->offset + lsg->length - qc->pad_len;
4527 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4528 psg->offset = offset_in_page(offset);
4529
4530 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4531 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4532 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4533 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4534 }
4535
4536 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4537 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4538 /* trim last sg */
4539 lsg->length -= qc->pad_len;
e1410f2d
JG
4540 if (lsg->length == 0)
4541 trim_sg = 1;
cedc9a47
JG
4542
4543 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4544 qc->n_elem - 1, lsg->length, qc->pad_len);
4545 }
4546
e1410f2d
JG
4547 pre_n_elem = qc->n_elem;
4548 if (trim_sg && pre_n_elem)
4549 pre_n_elem--;
4550
4551 if (!pre_n_elem) {
4552 n_elem = 0;
4553 goto skip_map;
4554 }
4555
1da177e4 4556 dir = qc->dma_dir;
2f1f610b 4557 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4558 if (n_elem < 1) {
4559 /* restore last sg */
4560 lsg->length += qc->pad_len;
1da177e4 4561 return -1;
537a95d9 4562 }
1da177e4
LT
4563
4564 DPRINTK("%d sg elements mapped\n", n_elem);
4565
e1410f2d 4566skip_map:
1da177e4
LT
4567 qc->n_elem = n_elem;
4568
4569 return 0;
4570}
4571
0baab86b 4572/**
c893a3ae 4573 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4574 * @buf: Buffer to swap
4575 * @buf_words: Number of 16-bit words in buffer.
4576 *
4577 * Swap halves of 16-bit words if needed to convert from
4578 * little-endian byte order to native cpu byte order, or
4579 * vice-versa.
4580 *
4581 * LOCKING:
6f0ef4fa 4582 * Inherited from caller.
0baab86b 4583 */
1da177e4
LT
4584void swap_buf_le16(u16 *buf, unsigned int buf_words)
4585{
4586#ifdef __BIG_ENDIAN
4587 unsigned int i;
4588
4589 for (i = 0; i < buf_words; i++)
4590 buf[i] = le16_to_cpu(buf[i]);
4591#endif /* __BIG_ENDIAN */
4592}
4593
6ae4cfb5 4594/**
0d5ff566 4595 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4596 * @adev: device to target
6ae4cfb5
AL
4597 * @buf: data buffer
4598 * @buflen: buffer length
344babaa 4599 * @write_data: read/write
6ae4cfb5
AL
4600 *
4601 * Transfer data from/to the device data register by PIO.
4602 *
4603 * LOCKING:
4604 * Inherited from caller.
6ae4cfb5 4605 */
0d5ff566
TH
4606void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4607 unsigned int buflen, int write_data)
1da177e4 4608{
9af5c9c9 4609 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4610 unsigned int words = buflen >> 1;
1da177e4 4611
6ae4cfb5 4612 /* Transfer multiple of 2 bytes */
1da177e4 4613 if (write_data)
0d5ff566 4614 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4615 else
0d5ff566 4616 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4617
4618 /* Transfer trailing 1 byte, if any. */
4619 if (unlikely(buflen & 0x01)) {
4620 u16 align_buf[1] = { 0 };
4621 unsigned char *trailing_buf = buf + buflen - 1;
4622
4623 if (write_data) {
4624 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4625 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4626 } else {
0d5ff566 4627 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4628 memcpy(trailing_buf, align_buf, 1);
4629 }
4630 }
1da177e4
LT
4631}
4632
75e99585 4633/**
0d5ff566 4634 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4635 * @adev: device to target
4636 * @buf: data buffer
4637 * @buflen: buffer length
4638 * @write_data: read/write
4639 *
88574551 4640 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4641 * transfer with interrupts disabled.
4642 *
4643 * LOCKING:
4644 * Inherited from caller.
4645 */
0d5ff566
TH
4646void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4647 unsigned int buflen, int write_data)
75e99585
AC
4648{
4649 unsigned long flags;
4650 local_irq_save(flags);
0d5ff566 4651 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4652 local_irq_restore(flags);
4653}
4654
4655
6ae4cfb5 4656/**
5a5dbd18 4657 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4658 * @qc: Command on going
4659 *
5a5dbd18 4660 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4661 *
4662 * LOCKING:
4663 * Inherited from caller.
4664 */
4665
1da177e4
LT
4666static void ata_pio_sector(struct ata_queued_cmd *qc)
4667{
4668 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4669 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4670 struct ata_port *ap = qc->ap;
4671 struct page *page;
4672 unsigned int offset;
4673 unsigned char *buf;
4674
5a5dbd18 4675 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4676 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4677
4678 page = sg[qc->cursg].page;
726f0785 4679 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4680
4681 /* get the current page and offset */
4682 page = nth_page(page, (offset >> PAGE_SHIFT));
4683 offset %= PAGE_SIZE;
4684
1da177e4
LT
4685 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4686
91b8b313
AL
4687 if (PageHighMem(page)) {
4688 unsigned long flags;
4689
a6b2c5d4 4690 /* FIXME: use a bounce buffer */
91b8b313
AL
4691 local_irq_save(flags);
4692 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4693
91b8b313 4694 /* do the actual data transfer */
5a5dbd18 4695 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4696
91b8b313
AL
4697 kunmap_atomic(buf, KM_IRQ0);
4698 local_irq_restore(flags);
4699 } else {
4700 buf = page_address(page);
5a5dbd18 4701 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4702 }
1da177e4 4703
5a5dbd18
ML
4704 qc->curbytes += qc->sect_size;
4705 qc->cursg_ofs += qc->sect_size;
1da177e4 4706
726f0785 4707 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4708 qc->cursg++;
4709 qc->cursg_ofs = 0;
4710 }
1da177e4 4711}
1da177e4 4712
07f6f7d0 4713/**
5a5dbd18 4714 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4715 * @qc: Command on going
4716 *
5a5dbd18 4717 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4718 * ATA device for the DRQ request.
4719 *
4720 * LOCKING:
4721 * Inherited from caller.
4722 */
1da177e4 4723
07f6f7d0
AL
4724static void ata_pio_sectors(struct ata_queued_cmd *qc)
4725{
4726 if (is_multi_taskfile(&qc->tf)) {
4727 /* READ/WRITE MULTIPLE */
4728 unsigned int nsect;
4729
587005de 4730 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4731
5a5dbd18 4732 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4733 qc->dev->multi_count);
07f6f7d0
AL
4734 while (nsect--)
4735 ata_pio_sector(qc);
4736 } else
4737 ata_pio_sector(qc);
4cc980b3
AL
4738
4739 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
4740}
4741
c71c1857
AL
4742/**
4743 * atapi_send_cdb - Write CDB bytes to hardware
4744 * @ap: Port to which ATAPI device is attached.
4745 * @qc: Taskfile currently active
4746 *
4747 * When device has indicated its readiness to accept
4748 * a CDB, this function is called. Send the CDB.
4749 *
4750 * LOCKING:
4751 * caller.
4752 */
4753
4754static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4755{
4756 /* send SCSI cdb */
4757 DPRINTK("send cdb\n");
db024d53 4758 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4759
a6b2c5d4 4760 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4761 ata_altstatus(ap); /* flush */
4762
4763 switch (qc->tf.protocol) {
4764 case ATA_PROT_ATAPI:
4765 ap->hsm_task_state = HSM_ST;
4766 break;
4767 case ATA_PROT_ATAPI_NODATA:
4768 ap->hsm_task_state = HSM_ST_LAST;
4769 break;
4770 case ATA_PROT_ATAPI_DMA:
4771 ap->hsm_task_state = HSM_ST_LAST;
4772 /* initiate bmdma */
4773 ap->ops->bmdma_start(qc);
4774 break;
4775 }
1da177e4
LT
4776}
4777
6ae4cfb5
AL
4778/**
4779 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4780 * @qc: Command on going
4781 * @bytes: number of bytes
4782 *
4783 * Transfer Transfer data from/to the ATAPI device.
4784 *
4785 * LOCKING:
4786 * Inherited from caller.
4787 *
4788 */
4789
1da177e4
LT
4790static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4791{
4792 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4793 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4794 struct ata_port *ap = qc->ap;
4795 struct page *page;
4796 unsigned char *buf;
4797 unsigned int offset, count;
4798
563a6e1f 4799 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4800 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4801
4802next_sg:
563a6e1f 4803 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4804 /*
563a6e1f
AL
4805 * The end of qc->sg is reached and the device expects
4806 * more data to transfer. In order not to overrun qc->sg
4807 * and fulfill length specified in the byte count register,
4808 * - for read case, discard trailing data from the device
4809 * - for write case, padding zero data to the device
4810 */
4811 u16 pad_buf[1] = { 0 };
4812 unsigned int words = bytes >> 1;
4813 unsigned int i;
4814
4815 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4816 ata_dev_printk(qc->dev, KERN_WARNING,
4817 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4818
4819 for (i = 0; i < words; i++)
a6b2c5d4 4820 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4821
14be71f4 4822 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4823 return;
4824 }
4825
cedc9a47 4826 sg = &qc->__sg[qc->cursg];
1da177e4 4827
1da177e4
LT
4828 page = sg->page;
4829 offset = sg->offset + qc->cursg_ofs;
4830
4831 /* get the current page and offset */
4832 page = nth_page(page, (offset >> PAGE_SHIFT));
4833 offset %= PAGE_SIZE;
4834
6952df03 4835 /* don't overrun current sg */
32529e01 4836 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4837
4838 /* don't cross page boundaries */
4839 count = min(count, (unsigned int)PAGE_SIZE - offset);
4840
7282aa4b
AL
4841 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4842
91b8b313
AL
4843 if (PageHighMem(page)) {
4844 unsigned long flags;
4845
a6b2c5d4 4846 /* FIXME: use bounce buffer */
91b8b313
AL
4847 local_irq_save(flags);
4848 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4849
91b8b313 4850 /* do the actual data transfer */
a6b2c5d4 4851 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4852
91b8b313
AL
4853 kunmap_atomic(buf, KM_IRQ0);
4854 local_irq_restore(flags);
4855 } else {
4856 buf = page_address(page);
a6b2c5d4 4857 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4858 }
1da177e4
LT
4859
4860 bytes -= count;
4861 qc->curbytes += count;
4862 qc->cursg_ofs += count;
4863
32529e01 4864 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4865 qc->cursg++;
4866 qc->cursg_ofs = 0;
4867 }
4868
563a6e1f 4869 if (bytes)
1da177e4 4870 goto next_sg;
1da177e4
LT
4871}
4872
6ae4cfb5
AL
4873/**
4874 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4875 * @qc: Command on going
4876 *
4877 * Transfer Transfer data from/to the ATAPI device.
4878 *
4879 * LOCKING:
4880 * Inherited from caller.
6ae4cfb5
AL
4881 */
4882
1da177e4
LT
4883static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4884{
4885 struct ata_port *ap = qc->ap;
4886 struct ata_device *dev = qc->dev;
4887 unsigned int ireason, bc_lo, bc_hi, bytes;
4888 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4889
eec4c3f3
AL
4890 /* Abuse qc->result_tf for temp storage of intermediate TF
4891 * here to save some kernel stack usage.
4892 * For normal completion, qc->result_tf is not relevant. For
4893 * error, qc->result_tf is later overwritten by ata_qc_complete().
4894 * So, the correctness of qc->result_tf is not affected.
4895 */
4896 ap->ops->tf_read(ap, &qc->result_tf);
4897 ireason = qc->result_tf.nsect;
4898 bc_lo = qc->result_tf.lbam;
4899 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4900 bytes = (bc_hi << 8) | bc_lo;
4901
4902 /* shall be cleared to zero, indicating xfer of data */
4903 if (ireason & (1 << 0))
4904 goto err_out;
4905
4906 /* make sure transfer direction matches expected */
4907 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4908 if (do_write != i_write)
4909 goto err_out;
4910
44877b4e 4911 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4912
1da177e4 4913 __atapi_pio_bytes(qc, bytes);
4cc980b3 4914 ata_altstatus(ap); /* flush */
1da177e4
LT
4915
4916 return;
4917
4918err_out:
f15a1daf 4919 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4920 qc->err_mask |= AC_ERR_HSM;
14be71f4 4921 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4922}
4923
4924/**
c234fb00
AL
4925 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4926 * @ap: the target ata_port
4927 * @qc: qc on going
1da177e4 4928 *
c234fb00
AL
4929 * RETURNS:
4930 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4931 */
c234fb00
AL
4932
4933static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4934{
c234fb00
AL
4935 if (qc->tf.flags & ATA_TFLAG_POLLING)
4936 return 1;
1da177e4 4937
c234fb00
AL
4938 if (ap->hsm_task_state == HSM_ST_FIRST) {
4939 if (qc->tf.protocol == ATA_PROT_PIO &&
4940 (qc->tf.flags & ATA_TFLAG_WRITE))
4941 return 1;
1da177e4 4942
c234fb00
AL
4943 if (is_atapi_taskfile(&qc->tf) &&
4944 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4945 return 1;
fe79e683
AL
4946 }
4947
c234fb00
AL
4948 return 0;
4949}
1da177e4 4950
c17ea20d
TH
4951/**
4952 * ata_hsm_qc_complete - finish a qc running on standard HSM
4953 * @qc: Command to complete
4954 * @in_wq: 1 if called from workqueue, 0 otherwise
4955 *
4956 * Finish @qc which is running on standard HSM.
4957 *
4958 * LOCKING:
cca3974e 4959 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4960 * Otherwise, none on entry and grabs host lock.
4961 */
4962static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4963{
4964 struct ata_port *ap = qc->ap;
4965 unsigned long flags;
4966
4967 if (ap->ops->error_handler) {
4968 if (in_wq) {
ba6a1308 4969 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4970
cca3974e
JG
4971 /* EH might have kicked in while host lock is
4972 * released.
c17ea20d
TH
4973 */
4974 qc = ata_qc_from_tag(ap, qc->tag);
4975 if (qc) {
4976 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4977 ap->ops->irq_on(ap);
c17ea20d
TH
4978 ata_qc_complete(qc);
4979 } else
4980 ata_port_freeze(ap);
4981 }
4982
ba6a1308 4983 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4984 } else {
4985 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4986 ata_qc_complete(qc);
4987 else
4988 ata_port_freeze(ap);
4989 }
4990 } else {
4991 if (in_wq) {
ba6a1308 4992 spin_lock_irqsave(ap->lock, flags);
83625006 4993 ap->ops->irq_on(ap);
c17ea20d 4994 ata_qc_complete(qc);
ba6a1308 4995 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4996 } else
4997 ata_qc_complete(qc);
4998 }
4999}
5000
bb5cb290
AL
5001/**
5002 * ata_hsm_move - move the HSM to the next state.
5003 * @ap: the target ata_port
5004 * @qc: qc on going
5005 * @status: current device status
5006 * @in_wq: 1 if called from workqueue, 0 otherwise
5007 *
5008 * RETURNS:
5009 * 1 when poll next status needed, 0 otherwise.
5010 */
9a1004d0
TH
5011int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5012 u8 status, int in_wq)
e2cec771 5013{
bb5cb290
AL
5014 unsigned long flags = 0;
5015 int poll_next;
5016
6912ccd5
AL
5017 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5018
bb5cb290
AL
5019 /* Make sure ata_qc_issue_prot() does not throw things
5020 * like DMA polling into the workqueue. Notice that
5021 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5022 */
c234fb00 5023 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5024
e2cec771 5025fsm_start:
999bb6f4 5026 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5027 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5028
e2cec771
AL
5029 switch (ap->hsm_task_state) {
5030 case HSM_ST_FIRST:
bb5cb290
AL
5031 /* Send first data block or PACKET CDB */
5032
5033 /* If polling, we will stay in the work queue after
5034 * sending the data. Otherwise, interrupt handler
5035 * takes over after sending the data.
5036 */
5037 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5038
e2cec771 5039 /* check device status */
3655d1d3
AL
5040 if (unlikely((status & ATA_DRQ) == 0)) {
5041 /* handle BSY=0, DRQ=0 as error */
5042 if (likely(status & (ATA_ERR | ATA_DF)))
5043 /* device stops HSM for abort/error */
5044 qc->err_mask |= AC_ERR_DEV;
5045 else
5046 /* HSM violation. Let EH handle this */
5047 qc->err_mask |= AC_ERR_HSM;
5048
14be71f4 5049 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5050 goto fsm_start;
1da177e4
LT
5051 }
5052
71601958
AL
5053 /* Device should not ask for data transfer (DRQ=1)
5054 * when it finds something wrong.
eee6c32f
AL
5055 * We ignore DRQ here and stop the HSM by
5056 * changing hsm_task_state to HSM_ST_ERR and
5057 * let the EH abort the command or reset the device.
71601958
AL
5058 */
5059 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5060 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5061 "error, dev_stat 0x%X\n", status);
3655d1d3 5062 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5063 ap->hsm_task_state = HSM_ST_ERR;
5064 goto fsm_start;
71601958 5065 }
1da177e4 5066
bb5cb290
AL
5067 /* Send the CDB (atapi) or the first data block (ata pio out).
5068 * During the state transition, interrupt handler shouldn't
5069 * be invoked before the data transfer is complete and
5070 * hsm_task_state is changed. Hence, the following locking.
5071 */
5072 if (in_wq)
ba6a1308 5073 spin_lock_irqsave(ap->lock, flags);
1da177e4 5074
bb5cb290
AL
5075 if (qc->tf.protocol == ATA_PROT_PIO) {
5076 /* PIO data out protocol.
5077 * send first data block.
5078 */
0565c26d 5079
bb5cb290
AL
5080 /* ata_pio_sectors() might change the state
5081 * to HSM_ST_LAST. so, the state is changed here
5082 * before ata_pio_sectors().
5083 */
5084 ap->hsm_task_state = HSM_ST;
5085 ata_pio_sectors(qc);
bb5cb290
AL
5086 } else
5087 /* send CDB */
5088 atapi_send_cdb(ap, qc);
5089
5090 if (in_wq)
ba6a1308 5091 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5092
5093 /* if polling, ata_pio_task() handles the rest.
5094 * otherwise, interrupt handler takes over from here.
5095 */
e2cec771 5096 break;
1c848984 5097
e2cec771
AL
5098 case HSM_ST:
5099 /* complete command or read/write the data register */
5100 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5101 /* ATAPI PIO protocol */
5102 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5103 /* No more data to transfer or device error.
5104 * Device error will be tagged in HSM_ST_LAST.
5105 */
e2cec771
AL
5106 ap->hsm_task_state = HSM_ST_LAST;
5107 goto fsm_start;
5108 }
1da177e4 5109
71601958
AL
5110 /* Device should not ask for data transfer (DRQ=1)
5111 * when it finds something wrong.
eee6c32f
AL
5112 * We ignore DRQ here and stop the HSM by
5113 * changing hsm_task_state to HSM_ST_ERR and
5114 * let the EH abort the command or reset the device.
71601958
AL
5115 */
5116 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5117 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5118 "device error, dev_stat 0x%X\n",
5119 status);
3655d1d3 5120 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5121 ap->hsm_task_state = HSM_ST_ERR;
5122 goto fsm_start;
71601958 5123 }
1da177e4 5124
e2cec771 5125 atapi_pio_bytes(qc);
7fb6ec28 5126
e2cec771
AL
5127 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5128 /* bad ireason reported by device */
5129 goto fsm_start;
1da177e4 5130
e2cec771
AL
5131 } else {
5132 /* ATA PIO protocol */
5133 if (unlikely((status & ATA_DRQ) == 0)) {
5134 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5135 if (likely(status & (ATA_ERR | ATA_DF)))
5136 /* device stops HSM for abort/error */
5137 qc->err_mask |= AC_ERR_DEV;
5138 else
55a8e2c8
TH
5139 /* HSM violation. Let EH handle this.
5140 * Phantom devices also trigger this
5141 * condition. Mark hint.
5142 */
5143 qc->err_mask |= AC_ERR_HSM |
5144 AC_ERR_NODEV_HINT;
3655d1d3 5145
e2cec771
AL
5146 ap->hsm_task_state = HSM_ST_ERR;
5147 goto fsm_start;
5148 }
1da177e4 5149
eee6c32f
AL
5150 /* For PIO reads, some devices may ask for
5151 * data transfer (DRQ=1) alone with ERR=1.
5152 * We respect DRQ here and transfer one
5153 * block of junk data before changing the
5154 * hsm_task_state to HSM_ST_ERR.
5155 *
5156 * For PIO writes, ERR=1 DRQ=1 doesn't make
5157 * sense since the data block has been
5158 * transferred to the device.
71601958
AL
5159 */
5160 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5161 /* data might be corrputed */
5162 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5163
5164 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5165 ata_pio_sectors(qc);
eee6c32f
AL
5166 status = ata_wait_idle(ap);
5167 }
5168
3655d1d3
AL
5169 if (status & (ATA_BUSY | ATA_DRQ))
5170 qc->err_mask |= AC_ERR_HSM;
5171
eee6c32f
AL
5172 /* ata_pio_sectors() might change the
5173 * state to HSM_ST_LAST. so, the state
5174 * is changed after ata_pio_sectors().
5175 */
5176 ap->hsm_task_state = HSM_ST_ERR;
5177 goto fsm_start;
71601958
AL
5178 }
5179
e2cec771
AL
5180 ata_pio_sectors(qc);
5181
5182 if (ap->hsm_task_state == HSM_ST_LAST &&
5183 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5184 /* all data read */
52a32205 5185 status = ata_wait_idle(ap);
e2cec771
AL
5186 goto fsm_start;
5187 }
5188 }
5189
bb5cb290 5190 poll_next = 1;
1da177e4
LT
5191 break;
5192
14be71f4 5193 case HSM_ST_LAST:
6912ccd5
AL
5194 if (unlikely(!ata_ok(status))) {
5195 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5196 ap->hsm_task_state = HSM_ST_ERR;
5197 goto fsm_start;
5198 }
5199
5200 /* no more data to transfer */
4332a771 5201 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5202 ap->print_id, qc->dev->devno, status);
e2cec771 5203
6912ccd5
AL
5204 WARN_ON(qc->err_mask);
5205
e2cec771 5206 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5207
e2cec771 5208 /* complete taskfile transaction */
c17ea20d 5209 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5210
5211 poll_next = 0;
1da177e4
LT
5212 break;
5213
14be71f4 5214 case HSM_ST_ERR:
e2cec771
AL
5215 /* make sure qc->err_mask is available to
5216 * know what's wrong and recover
5217 */
5218 WARN_ON(qc->err_mask == 0);
5219
5220 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5221
999bb6f4 5222 /* complete taskfile transaction */
c17ea20d 5223 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5224
5225 poll_next = 0;
e2cec771
AL
5226 break;
5227 default:
bb5cb290 5228 poll_next = 0;
6912ccd5 5229 BUG();
1da177e4
LT
5230 }
5231
bb5cb290 5232 return poll_next;
1da177e4
LT
5233}
5234
65f27f38 5235static void ata_pio_task(struct work_struct *work)
8061f5f0 5236{
65f27f38
DH
5237 struct ata_port *ap =
5238 container_of(work, struct ata_port, port_task.work);
5239 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5240 u8 status;
a1af3734 5241 int poll_next;
8061f5f0 5242
7fb6ec28 5243fsm_start:
a1af3734 5244 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5245
a1af3734
AL
5246 /*
5247 * This is purely heuristic. This is a fast path.
5248 * Sometimes when we enter, BSY will be cleared in
5249 * a chk-status or two. If not, the drive is probably seeking
5250 * or something. Snooze for a couple msecs, then
5251 * chk-status again. If still busy, queue delayed work.
5252 */
5253 status = ata_busy_wait(ap, ATA_BUSY, 5);
5254 if (status & ATA_BUSY) {
5255 msleep(2);
5256 status = ata_busy_wait(ap, ATA_BUSY, 10);
5257 if (status & ATA_BUSY) {
31ce6dae 5258 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5259 return;
5260 }
8061f5f0
TH
5261 }
5262
a1af3734
AL
5263 /* move the HSM */
5264 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5265
a1af3734
AL
5266 /* another command or interrupt handler
5267 * may be running at this point.
5268 */
5269 if (poll_next)
7fb6ec28 5270 goto fsm_start;
8061f5f0
TH
5271}
5272
1da177e4
LT
5273/**
5274 * ata_qc_new - Request an available ATA command, for queueing
5275 * @ap: Port associated with device @dev
5276 * @dev: Device from whom we request an available command structure
5277 *
5278 * LOCKING:
0cba632b 5279 * None.
1da177e4
LT
5280 */
5281
5282static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5283{
5284 struct ata_queued_cmd *qc = NULL;
5285 unsigned int i;
5286
e3180499 5287 /* no command while frozen */
b51e9e5d 5288 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5289 return NULL;
5290
2ab7db1f
TH
5291 /* the last tag is reserved for internal command. */
5292 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5293 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5294 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5295 break;
5296 }
5297
5298 if (qc)
5299 qc->tag = i;
5300
5301 return qc;
5302}
5303
5304/**
5305 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5306 * @dev: Device from whom we request an available command structure
5307 *
5308 * LOCKING:
0cba632b 5309 * None.
1da177e4
LT
5310 */
5311
3373efd8 5312struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5313{
9af5c9c9 5314 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5315 struct ata_queued_cmd *qc;
5316
5317 qc = ata_qc_new(ap);
5318 if (qc) {
1da177e4
LT
5319 qc->scsicmd = NULL;
5320 qc->ap = ap;
5321 qc->dev = dev;
1da177e4 5322
2c13b7ce 5323 ata_qc_reinit(qc);
1da177e4
LT
5324 }
5325
5326 return qc;
5327}
5328
1da177e4
LT
5329/**
5330 * ata_qc_free - free unused ata_queued_cmd
5331 * @qc: Command to complete
5332 *
5333 * Designed to free unused ata_queued_cmd object
5334 * in case something prevents using it.
5335 *
5336 * LOCKING:
cca3974e 5337 * spin_lock_irqsave(host lock)
1da177e4
LT
5338 */
5339void ata_qc_free(struct ata_queued_cmd *qc)
5340{
4ba946e9
TH
5341 struct ata_port *ap = qc->ap;
5342 unsigned int tag;
5343
a4631474 5344 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5345
4ba946e9
TH
5346 qc->flags = 0;
5347 tag = qc->tag;
5348 if (likely(ata_tag_valid(tag))) {
4ba946e9 5349 qc->tag = ATA_TAG_POISON;
6cec4a39 5350 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5351 }
1da177e4
LT
5352}
5353
76014427 5354void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5355{
dedaf2b0 5356 struct ata_port *ap = qc->ap;
9af5c9c9 5357 struct ata_link *link = qc->dev->link;
dedaf2b0 5358
a4631474
TH
5359 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5360 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5361
5362 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5363 ata_sg_clean(qc);
5364
7401abf2 5365 /* command should be marked inactive atomically with qc completion */
dedaf2b0 5366 if (qc->tf.protocol == ATA_PROT_NCQ)
9af5c9c9 5367 link->sactive &= ~(1 << qc->tag);
dedaf2b0 5368 else
9af5c9c9 5369 link->active_tag = ATA_TAG_POISON;
7401abf2 5370
3f3791d3
AL
5371 /* atapi: mark qc as inactive to prevent the interrupt handler
5372 * from completing the command twice later, before the error handler
5373 * is called. (when rc != 0 and atapi request sense is needed)
5374 */
5375 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5376 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5377
1da177e4 5378 /* call completion callback */
77853bf2 5379 qc->complete_fn(qc);
1da177e4
LT
5380}
5381
39599a53
TH
5382static void fill_result_tf(struct ata_queued_cmd *qc)
5383{
5384 struct ata_port *ap = qc->ap;
5385
39599a53 5386 qc->result_tf.flags = qc->tf.flags;
4742d54f 5387 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5388}
5389
f686bcb8
TH
5390/**
5391 * ata_qc_complete - Complete an active ATA command
5392 * @qc: Command to complete
5393 * @err_mask: ATA Status register contents
5394 *
5395 * Indicate to the mid and upper layers that an ATA
5396 * command has completed, with either an ok or not-ok status.
5397 *
5398 * LOCKING:
cca3974e 5399 * spin_lock_irqsave(host lock)
f686bcb8
TH
5400 */
5401void ata_qc_complete(struct ata_queued_cmd *qc)
5402{
5403 struct ata_port *ap = qc->ap;
5404
5405 /* XXX: New EH and old EH use different mechanisms to
5406 * synchronize EH with regular execution path.
5407 *
5408 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5409 * Normal execution path is responsible for not accessing a
5410 * failed qc. libata core enforces the rule by returning NULL
5411 * from ata_qc_from_tag() for failed qcs.
5412 *
5413 * Old EH depends on ata_qc_complete() nullifying completion
5414 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5415 * not synchronize with interrupt handler. Only PIO task is
5416 * taken care of.
5417 */
5418 if (ap->ops->error_handler) {
b51e9e5d 5419 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5420
5421 if (unlikely(qc->err_mask))
5422 qc->flags |= ATA_QCFLAG_FAILED;
5423
5424 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5425 if (!ata_tag_internal(qc->tag)) {
5426 /* always fill result TF for failed qc */
39599a53 5427 fill_result_tf(qc);
f686bcb8
TH
5428 ata_qc_schedule_eh(qc);
5429 return;
5430 }
5431 }
5432
5433 /* read result TF if requested */
5434 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5435 fill_result_tf(qc);
f686bcb8
TH
5436
5437 __ata_qc_complete(qc);
5438 } else {
5439 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5440 return;
5441
5442 /* read result TF if failed or requested */
5443 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5444 fill_result_tf(qc);
f686bcb8
TH
5445
5446 __ata_qc_complete(qc);
5447 }
5448}
5449
dedaf2b0
TH
5450/**
5451 * ata_qc_complete_multiple - Complete multiple qcs successfully
5452 * @ap: port in question
5453 * @qc_active: new qc_active mask
5454 * @finish_qc: LLDD callback invoked before completing a qc
5455 *
5456 * Complete in-flight commands. This functions is meant to be
5457 * called from low-level driver's interrupt routine to complete
5458 * requests normally. ap->qc_active and @qc_active is compared
5459 * and commands are completed accordingly.
5460 *
5461 * LOCKING:
cca3974e 5462 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5463 *
5464 * RETURNS:
5465 * Number of completed commands on success, -errno otherwise.
5466 */
5467int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5468 void (*finish_qc)(struct ata_queued_cmd *))
5469{
5470 int nr_done = 0;
5471 u32 done_mask;
5472 int i;
5473
5474 done_mask = ap->qc_active ^ qc_active;
5475
5476 if (unlikely(done_mask & qc_active)) {
5477 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5478 "(%08x->%08x)\n", ap->qc_active, qc_active);
5479 return -EINVAL;
5480 }
5481
5482 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5483 struct ata_queued_cmd *qc;
5484
5485 if (!(done_mask & (1 << i)))
5486 continue;
5487
5488 if ((qc = ata_qc_from_tag(ap, i))) {
5489 if (finish_qc)
5490 finish_qc(qc);
5491 ata_qc_complete(qc);
5492 nr_done++;
5493 }
5494 }
5495
5496 return nr_done;
5497}
5498
1da177e4
LT
5499static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5500{
5501 struct ata_port *ap = qc->ap;
5502
5503 switch (qc->tf.protocol) {
3dc1d881 5504 case ATA_PROT_NCQ:
1da177e4
LT
5505 case ATA_PROT_DMA:
5506 case ATA_PROT_ATAPI_DMA:
5507 return 1;
5508
5509 case ATA_PROT_ATAPI:
5510 case ATA_PROT_PIO:
1da177e4
LT
5511 if (ap->flags & ATA_FLAG_PIO_DMA)
5512 return 1;
5513
5514 /* fall through */
5515
5516 default:
5517 return 0;
5518 }
5519
5520 /* never reached */
5521}
5522
5523/**
5524 * ata_qc_issue - issue taskfile to device
5525 * @qc: command to issue to device
5526 *
5527 * Prepare an ATA command to submission to device.
5528 * This includes mapping the data into a DMA-able
5529 * area, filling in the S/G table, and finally
5530 * writing the taskfile to hardware, starting the command.
5531 *
5532 * LOCKING:
cca3974e 5533 * spin_lock_irqsave(host lock)
1da177e4 5534 */
8e0e694a 5535void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5536{
5537 struct ata_port *ap = qc->ap;
9af5c9c9 5538 struct ata_link *link = qc->dev->link;
1da177e4 5539
dedaf2b0
TH
5540 /* Make sure only one non-NCQ command is outstanding. The
5541 * check is skipped for old EH because it reuses active qc to
5542 * request ATAPI sense.
5543 */
9af5c9c9 5544 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5545
5546 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9
TH
5547 WARN_ON(link->sactive & (1 << qc->tag));
5548 link->sactive |= 1 << qc->tag;
dedaf2b0 5549 } else {
9af5c9c9
TH
5550 WARN_ON(link->sactive);
5551 link->active_tag = qc->tag;
dedaf2b0
TH
5552 }
5553
e4a70e76 5554 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5555 ap->qc_active |= 1 << qc->tag;
e4a70e76 5556
1da177e4
LT
5557 if (ata_should_dma_map(qc)) {
5558 if (qc->flags & ATA_QCFLAG_SG) {
5559 if (ata_sg_setup(qc))
8e436af9 5560 goto sg_err;
1da177e4
LT
5561 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5562 if (ata_sg_setup_one(qc))
8e436af9 5563 goto sg_err;
1da177e4
LT
5564 }
5565 } else {
5566 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5567 }
5568
5569 ap->ops->qc_prep(qc);
5570
8e0e694a
TH
5571 qc->err_mask |= ap->ops->qc_issue(qc);
5572 if (unlikely(qc->err_mask))
5573 goto err;
5574 return;
1da177e4 5575
8e436af9
TH
5576sg_err:
5577 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5578 qc->err_mask |= AC_ERR_SYSTEM;
5579err:
5580 ata_qc_complete(qc);
1da177e4
LT
5581}
5582
5583/**
5584 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5585 * @qc: command to issue to device
5586 *
5587 * Using various libata functions and hooks, this function
5588 * starts an ATA command. ATA commands are grouped into
5589 * classes called "protocols", and issuing each type of protocol
5590 * is slightly different.
5591 *
0baab86b
EF
5592 * May be used as the qc_issue() entry in ata_port_operations.
5593 *
1da177e4 5594 * LOCKING:
cca3974e 5595 * spin_lock_irqsave(host lock)
1da177e4
LT
5596 *
5597 * RETURNS:
9a3d9eb0 5598 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5599 */
5600
9a3d9eb0 5601unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5602{
5603 struct ata_port *ap = qc->ap;
5604
e50362ec
AL
5605 /* Use polling pio if the LLD doesn't handle
5606 * interrupt driven pio and atapi CDB interrupt.
5607 */
5608 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5609 switch (qc->tf.protocol) {
5610 case ATA_PROT_PIO:
e3472cbe 5611 case ATA_PROT_NODATA:
e50362ec
AL
5612 case ATA_PROT_ATAPI:
5613 case ATA_PROT_ATAPI_NODATA:
5614 qc->tf.flags |= ATA_TFLAG_POLLING;
5615 break;
5616 case ATA_PROT_ATAPI_DMA:
5617 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5618 /* see ata_dma_blacklisted() */
e50362ec
AL
5619 BUG();
5620 break;
5621 default:
5622 break;
5623 }
5624 }
5625
312f7da2 5626 /* select the device */
1da177e4
LT
5627 ata_dev_select(ap, qc->dev->devno, 1, 0);
5628
312f7da2 5629 /* start the command */
1da177e4
LT
5630 switch (qc->tf.protocol) {
5631 case ATA_PROT_NODATA:
312f7da2
AL
5632 if (qc->tf.flags & ATA_TFLAG_POLLING)
5633 ata_qc_set_polling(qc);
5634
e5338254 5635 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5636 ap->hsm_task_state = HSM_ST_LAST;
5637
5638 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5639 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5640
1da177e4
LT
5641 break;
5642
5643 case ATA_PROT_DMA:
587005de 5644 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5645
1da177e4
LT
5646 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5647 ap->ops->bmdma_setup(qc); /* set up bmdma */
5648 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5649 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5650 break;
5651
312f7da2
AL
5652 case ATA_PROT_PIO:
5653 if (qc->tf.flags & ATA_TFLAG_POLLING)
5654 ata_qc_set_polling(qc);
1da177e4 5655
e5338254 5656 ata_tf_to_host(ap, &qc->tf);
312f7da2 5657
54f00389
AL
5658 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5659 /* PIO data out protocol */
5660 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5661 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5662
5663 /* always send first data block using
e27486db 5664 * the ata_pio_task() codepath.
54f00389 5665 */
312f7da2 5666 } else {
54f00389
AL
5667 /* PIO data in protocol */
5668 ap->hsm_task_state = HSM_ST;
5669
5670 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5671 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5672
5673 /* if polling, ata_pio_task() handles the rest.
5674 * otherwise, interrupt handler takes over from here.
5675 */
312f7da2
AL
5676 }
5677
1da177e4
LT
5678 break;
5679
1da177e4 5680 case ATA_PROT_ATAPI:
1da177e4 5681 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5682 if (qc->tf.flags & ATA_TFLAG_POLLING)
5683 ata_qc_set_polling(qc);
5684
e5338254 5685 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5686
312f7da2
AL
5687 ap->hsm_task_state = HSM_ST_FIRST;
5688
5689 /* send cdb by polling if no cdb interrupt */
5690 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5691 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5692 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5693 break;
5694
5695 case ATA_PROT_ATAPI_DMA:
587005de 5696 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5697
1da177e4
LT
5698 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5699 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5700 ap->hsm_task_state = HSM_ST_FIRST;
5701
5702 /* send cdb by polling if no cdb interrupt */
5703 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5704 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5705 break;
5706
5707 default:
5708 WARN_ON(1);
9a3d9eb0 5709 return AC_ERR_SYSTEM;
1da177e4
LT
5710 }
5711
5712 return 0;
5713}
5714
1da177e4
LT
5715/**
5716 * ata_host_intr - Handle host interrupt for given (port, task)
5717 * @ap: Port on which interrupt arrived (possibly...)
5718 * @qc: Taskfile currently active in engine
5719 *
5720 * Handle host interrupt for given queued command. Currently,
5721 * only DMA interrupts are handled. All other commands are
5722 * handled via polling with interrupts disabled (nIEN bit).
5723 *
5724 * LOCKING:
cca3974e 5725 * spin_lock_irqsave(host lock)
1da177e4
LT
5726 *
5727 * RETURNS:
5728 * One if interrupt was handled, zero if not (shared irq).
5729 */
5730
5731inline unsigned int ata_host_intr (struct ata_port *ap,
5732 struct ata_queued_cmd *qc)
5733{
9af5c9c9 5734 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 5735 u8 status, host_stat = 0;
1da177e4 5736
312f7da2 5737 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5738 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5739
312f7da2
AL
5740 /* Check whether we are expecting interrupt in this state */
5741 switch (ap->hsm_task_state) {
5742 case HSM_ST_FIRST:
6912ccd5
AL
5743 /* Some pre-ATAPI-4 devices assert INTRQ
5744 * at this state when ready to receive CDB.
5745 */
1da177e4 5746
312f7da2
AL
5747 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5748 * The flag was turned on only for atapi devices.
5749 * No need to check is_atapi_taskfile(&qc->tf) again.
5750 */
5751 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5752 goto idle_irq;
1da177e4 5753 break;
312f7da2
AL
5754 case HSM_ST_LAST:
5755 if (qc->tf.protocol == ATA_PROT_DMA ||
5756 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5757 /* check status of DMA engine */
5758 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5759 VPRINTK("ata%u: host_stat 0x%X\n",
5760 ap->print_id, host_stat);
312f7da2
AL
5761
5762 /* if it's not our irq... */
5763 if (!(host_stat & ATA_DMA_INTR))
5764 goto idle_irq;
5765
5766 /* before we do anything else, clear DMA-Start bit */
5767 ap->ops->bmdma_stop(qc);
a4f16610
AL
5768
5769 if (unlikely(host_stat & ATA_DMA_ERR)) {
5770 /* error when transfering data to/from memory */
5771 qc->err_mask |= AC_ERR_HOST_BUS;
5772 ap->hsm_task_state = HSM_ST_ERR;
5773 }
312f7da2
AL
5774 }
5775 break;
5776 case HSM_ST:
5777 break;
1da177e4
LT
5778 default:
5779 goto idle_irq;
5780 }
5781
312f7da2
AL
5782 /* check altstatus */
5783 status = ata_altstatus(ap);
5784 if (status & ATA_BUSY)
5785 goto idle_irq;
1da177e4 5786
312f7da2
AL
5787 /* check main status, clearing INTRQ */
5788 status = ata_chk_status(ap);
5789 if (unlikely(status & ATA_BUSY))
5790 goto idle_irq;
1da177e4 5791
312f7da2
AL
5792 /* ack bmdma irq events */
5793 ap->ops->irq_clear(ap);
1da177e4 5794
bb5cb290 5795 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5796
5797 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5798 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5799 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5800
1da177e4
LT
5801 return 1; /* irq handled */
5802
5803idle_irq:
5804 ap->stats.idle_irq++;
5805
5806#ifdef ATA_IRQ_TRAP
5807 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
5808 ata_chk_status(ap);
5809 ap->ops->irq_clear(ap);
f15a1daf 5810 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5811 return 1;
1da177e4
LT
5812 }
5813#endif
5814 return 0; /* irq not handled */
5815}
5816
5817/**
5818 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5819 * @irq: irq line (unused)
cca3974e 5820 * @dev_instance: pointer to our ata_host information structure
1da177e4 5821 *
0cba632b
JG
5822 * Default interrupt handler for PCI IDE devices. Calls
5823 * ata_host_intr() for each port that is not disabled.
5824 *
1da177e4 5825 * LOCKING:
cca3974e 5826 * Obtains host lock during operation.
1da177e4
LT
5827 *
5828 * RETURNS:
0cba632b 5829 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5830 */
5831
7d12e780 5832irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5833{
cca3974e 5834 struct ata_host *host = dev_instance;
1da177e4
LT
5835 unsigned int i;
5836 unsigned int handled = 0;
5837 unsigned long flags;
5838
5839 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5840 spin_lock_irqsave(&host->lock, flags);
1da177e4 5841
cca3974e 5842 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5843 struct ata_port *ap;
5844
cca3974e 5845 ap = host->ports[i];
c1389503 5846 if (ap &&
029f5468 5847 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5848 struct ata_queued_cmd *qc;
5849
9af5c9c9 5850 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 5851 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5852 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5853 handled |= ata_host_intr(ap, qc);
5854 }
5855 }
5856
cca3974e 5857 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5858
5859 return IRQ_RETVAL(handled);
5860}
5861
34bf2170
TH
5862/**
5863 * sata_scr_valid - test whether SCRs are accessible
936fd732 5864 * @link: ATA link to test SCR accessibility for
34bf2170 5865 *
936fd732 5866 * Test whether SCRs are accessible for @link.
34bf2170
TH
5867 *
5868 * LOCKING:
5869 * None.
5870 *
5871 * RETURNS:
5872 * 1 if SCRs are accessible, 0 otherwise.
5873 */
936fd732 5874int sata_scr_valid(struct ata_link *link)
34bf2170 5875{
936fd732
TH
5876 struct ata_port *ap = link->ap;
5877
a16abc0b 5878 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
5879}
5880
5881/**
5882 * sata_scr_read - read SCR register of the specified port
936fd732 5883 * @link: ATA link to read SCR for
34bf2170
TH
5884 * @reg: SCR to read
5885 * @val: Place to store read value
5886 *
936fd732 5887 * Read SCR register @reg of @link into *@val. This function is
34bf2170
TH
5888 * guaranteed to succeed if the cable type of the port is SATA
5889 * and the port implements ->scr_read.
5890 *
5891 * LOCKING:
5892 * None.
5893 *
5894 * RETURNS:
5895 * 0 on success, negative errno on failure.
5896 */
936fd732 5897int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 5898{
936fd732
TH
5899 struct ata_port *ap = link->ap;
5900
5901 if (sata_scr_valid(link))
da3dbb17 5902 return ap->ops->scr_read(ap, reg, val);
34bf2170
TH
5903 return -EOPNOTSUPP;
5904}
5905
5906/**
5907 * sata_scr_write - write SCR register of the specified port
936fd732 5908 * @link: ATA link to write SCR for
34bf2170
TH
5909 * @reg: SCR to write
5910 * @val: value to write
5911 *
936fd732 5912 * Write @val to SCR register @reg of @link. This function is
34bf2170
TH
5913 * guaranteed to succeed if the cable type of the port is SATA
5914 * and the port implements ->scr_read.
5915 *
5916 * LOCKING:
5917 * None.
5918 *
5919 * RETURNS:
5920 * 0 on success, negative errno on failure.
5921 */
936fd732 5922int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 5923{
936fd732
TH
5924 struct ata_port *ap = link->ap;
5925
5926 if (sata_scr_valid(link))
da3dbb17 5927 return ap->ops->scr_write(ap, reg, val);
34bf2170
TH
5928 return -EOPNOTSUPP;
5929}
5930
5931/**
5932 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 5933 * @link: ATA link to write SCR for
34bf2170
TH
5934 * @reg: SCR to write
5935 * @val: value to write
5936 *
5937 * This function is identical to sata_scr_write() except that this
5938 * function performs flush after writing to the register.
5939 *
5940 * LOCKING:
5941 * None.
5942 *
5943 * RETURNS:
5944 * 0 on success, negative errno on failure.
5945 */
936fd732 5946int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 5947{
936fd732 5948 struct ata_port *ap = link->ap;
da3dbb17
TH
5949 int rc;
5950
936fd732 5951 if (sata_scr_valid(link)) {
da3dbb17
TH
5952 rc = ap->ops->scr_write(ap, reg, val);
5953 if (rc == 0)
5954 rc = ap->ops->scr_read(ap, reg, &val);
5955 return rc;
34bf2170
TH
5956 }
5957 return -EOPNOTSUPP;
5958}
5959
5960/**
936fd732
TH
5961 * ata_link_online - test whether the given link is online
5962 * @link: ATA link to test
34bf2170 5963 *
936fd732
TH
5964 * Test whether @link is online. Note that this function returns
5965 * 0 if online status of @link cannot be obtained, so
5966 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5967 *
5968 * LOCKING:
5969 * None.
5970 *
5971 * RETURNS:
5972 * 1 if the port online status is available and online.
5973 */
936fd732 5974int ata_link_online(struct ata_link *link)
34bf2170
TH
5975{
5976 u32 sstatus;
5977
936fd732
TH
5978 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5979 (sstatus & 0xf) == 0x3)
34bf2170
TH
5980 return 1;
5981 return 0;
5982}
5983
5984/**
936fd732
TH
5985 * ata_link_offline - test whether the given link is offline
5986 * @link: ATA link to test
34bf2170 5987 *
936fd732
TH
5988 * Test whether @link is offline. Note that this function
5989 * returns 0 if offline status of @link cannot be obtained, so
5990 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5991 *
5992 * LOCKING:
5993 * None.
5994 *
5995 * RETURNS:
5996 * 1 if the port offline status is available and offline.
5997 */
936fd732 5998int ata_link_offline(struct ata_link *link)
34bf2170
TH
5999{
6000 u32 sstatus;
6001
936fd732
TH
6002 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6003 (sstatus & 0xf) != 0x3)
34bf2170
TH
6004 return 1;
6005 return 0;
6006}
0baab86b 6007
77b08fb5 6008int ata_flush_cache(struct ata_device *dev)
9b847548 6009{
977e6b9f 6010 unsigned int err_mask;
9b847548
JA
6011 u8 cmd;
6012
6013 if (!ata_try_flush_cache(dev))
6014 return 0;
6015
6fc49adb 6016 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6017 cmd = ATA_CMD_FLUSH_EXT;
6018 else
6019 cmd = ATA_CMD_FLUSH;
6020
4f34337b
AC
6021 /* This is wrong. On a failed flush we get back the LBA of the lost
6022 sector and we should (assuming it wasn't aborted as unknown) issue
6023 a further flush command to continue the writeback until it
6024 does not error */
977e6b9f
TH
6025 err_mask = ata_do_simple_cmd(dev, cmd);
6026 if (err_mask) {
6027 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6028 return -EIO;
6029 }
6030
6031 return 0;
9b847548
JA
6032}
6033
6ffa01d8 6034#ifdef CONFIG_PM
cca3974e
JG
6035static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6036 unsigned int action, unsigned int ehi_flags,
6037 int wait)
500530f6
TH
6038{
6039 unsigned long flags;
6040 int i, rc;
6041
cca3974e
JG
6042 for (i = 0; i < host->n_ports; i++) {
6043 struct ata_port *ap = host->ports[i];
e3667ebf 6044 struct ata_link *link;
500530f6
TH
6045
6046 /* Previous resume operation might still be in
6047 * progress. Wait for PM_PENDING to clear.
6048 */
6049 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6050 ata_port_wait_eh(ap);
6051 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6052 }
6053
6054 /* request PM ops to EH */
6055 spin_lock_irqsave(ap->lock, flags);
6056
6057 ap->pm_mesg = mesg;
6058 if (wait) {
6059 rc = 0;
6060 ap->pm_result = &rc;
6061 }
6062
6063 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6064 __ata_port_for_each_link(link, ap) {
6065 link->eh_info.action |= action;
6066 link->eh_info.flags |= ehi_flags;
6067 }
500530f6
TH
6068
6069 ata_port_schedule_eh(ap);
6070
6071 spin_unlock_irqrestore(ap->lock, flags);
6072
6073 /* wait and check result */
6074 if (wait) {
6075 ata_port_wait_eh(ap);
6076 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6077 if (rc)
6078 return rc;
6079 }
6080 }
6081
6082 return 0;
6083}
6084
6085/**
cca3974e
JG
6086 * ata_host_suspend - suspend host
6087 * @host: host to suspend
500530f6
TH
6088 * @mesg: PM message
6089 *
cca3974e 6090 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6091 * function requests EH to perform PM operations and waits for EH
6092 * to finish.
6093 *
6094 * LOCKING:
6095 * Kernel thread context (may sleep).
6096 *
6097 * RETURNS:
6098 * 0 on success, -errno on failure.
6099 */
cca3974e 6100int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6101{
9666f400 6102 int rc;
500530f6 6103
cca3974e 6104 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6105 if (rc == 0)
6106 host->dev->power.power_state = mesg;
500530f6
TH
6107 return rc;
6108}
6109
6110/**
cca3974e
JG
6111 * ata_host_resume - resume host
6112 * @host: host to resume
500530f6 6113 *
cca3974e 6114 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6115 * function requests EH to perform PM operations and returns.
6116 * Note that all resume operations are performed parallely.
6117 *
6118 * LOCKING:
6119 * Kernel thread context (may sleep).
6120 */
cca3974e 6121void ata_host_resume(struct ata_host *host)
500530f6 6122{
cca3974e
JG
6123 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6124 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6125 host->dev->power.power_state = PMSG_ON;
500530f6 6126}
6ffa01d8 6127#endif
500530f6 6128
c893a3ae
RD
6129/**
6130 * ata_port_start - Set port up for dma.
6131 * @ap: Port to initialize
6132 *
6133 * Called just after data structures for each port are
6134 * initialized. Allocates space for PRD table.
6135 *
6136 * May be used as the port_start() entry in ata_port_operations.
6137 *
6138 * LOCKING:
6139 * Inherited from caller.
6140 */
f0d36efd 6141int ata_port_start(struct ata_port *ap)
1da177e4 6142{
2f1f610b 6143 struct device *dev = ap->dev;
6037d6bb 6144 int rc;
1da177e4 6145
f0d36efd
TH
6146 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6147 GFP_KERNEL);
1da177e4
LT
6148 if (!ap->prd)
6149 return -ENOMEM;
6150
6037d6bb 6151 rc = ata_pad_alloc(ap, dev);
f0d36efd 6152 if (rc)
6037d6bb 6153 return rc;
1da177e4 6154
f0d36efd
TH
6155 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6156 (unsigned long long)ap->prd_dma);
1da177e4
LT
6157 return 0;
6158}
6159
3ef3b43d
TH
6160/**
6161 * ata_dev_init - Initialize an ata_device structure
6162 * @dev: Device structure to initialize
6163 *
6164 * Initialize @dev in preparation for probing.
6165 *
6166 * LOCKING:
6167 * Inherited from caller.
6168 */
6169void ata_dev_init(struct ata_device *dev)
6170{
9af5c9c9
TH
6171 struct ata_link *link = dev->link;
6172 struct ata_port *ap = link->ap;
72fa4b74
TH
6173 unsigned long flags;
6174
5a04bf4b 6175 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6176 link->sata_spd_limit = link->hw_sata_spd_limit;
6177 link->sata_spd = 0;
5a04bf4b 6178
72fa4b74
TH
6179 /* High bits of dev->flags are used to record warm plug
6180 * requests which occur asynchronously. Synchronize using
cca3974e 6181 * host lock.
72fa4b74 6182 */
ba6a1308 6183 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6184 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6185 dev->horkage = 0;
ba6a1308 6186 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6187
72fa4b74
TH
6188 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6189 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6190 dev->pio_mask = UINT_MAX;
6191 dev->mwdma_mask = UINT_MAX;
6192 dev->udma_mask = UINT_MAX;
6193}
6194
4fb37a25
TH
6195/**
6196 * ata_link_init - Initialize an ata_link structure
6197 * @ap: ATA port link is attached to
6198 * @link: Link structure to initialize
8989805d 6199 * @pmp: Port multiplier port number
4fb37a25
TH
6200 *
6201 * Initialize @link.
6202 *
6203 * LOCKING:
6204 * Kernel thread context (may sleep)
6205 */
8989805d 6206static void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6207{
6208 int i;
6209
6210 /* clear everything except for devices */
6211 memset(link, 0, offsetof(struct ata_link, device[0]));
6212
6213 link->ap = ap;
8989805d 6214 link->pmp = pmp;
4fb37a25
TH
6215 link->active_tag = ATA_TAG_POISON;
6216 link->hw_sata_spd_limit = UINT_MAX;
6217
6218 /* can't use iterator, ap isn't initialized yet */
6219 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6220 struct ata_device *dev = &link->device[i];
6221
6222 dev->link = link;
6223 dev->devno = dev - link->device;
6224 ata_dev_init(dev);
6225 }
6226}
6227
6228/**
6229 * sata_link_init_spd - Initialize link->sata_spd_limit
6230 * @link: Link to configure sata_spd_limit for
6231 *
6232 * Initialize @link->[hw_]sata_spd_limit to the currently
6233 * configured value.
6234 *
6235 * LOCKING:
6236 * Kernel thread context (may sleep).
6237 *
6238 * RETURNS:
6239 * 0 on success, -errno on failure.
6240 */
6241static int sata_link_init_spd(struct ata_link *link)
6242{
6243 u32 scontrol, spd;
6244 int rc;
6245
6246 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6247 if (rc)
6248 return rc;
6249
6250 spd = (scontrol >> 4) & 0xf;
6251 if (spd)
6252 link->hw_sata_spd_limit &= (1 << spd) - 1;
6253
6254 link->sata_spd_limit = link->hw_sata_spd_limit;
6255
6256 return 0;
6257}
6258
1da177e4 6259/**
f3187195
TH
6260 * ata_port_alloc - allocate and initialize basic ATA port resources
6261 * @host: ATA host this allocated port belongs to
1da177e4 6262 *
f3187195
TH
6263 * Allocate and initialize basic ATA port resources.
6264 *
6265 * RETURNS:
6266 * Allocate ATA port on success, NULL on failure.
0cba632b 6267 *
1da177e4 6268 * LOCKING:
f3187195 6269 * Inherited from calling layer (may sleep).
1da177e4 6270 */
f3187195 6271struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6272{
f3187195 6273 struct ata_port *ap;
1da177e4 6274
f3187195
TH
6275 DPRINTK("ENTER\n");
6276
6277 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6278 if (!ap)
6279 return NULL;
6280
f4d6d004 6281 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6282 ap->lock = &host->lock;
198e0fed 6283 ap->flags = ATA_FLAG_DISABLED;
f3187195 6284 ap->print_id = -1;
1da177e4 6285 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6286 ap->host = host;
f3187195 6287 ap->dev = host->dev;
1da177e4 6288 ap->last_ctl = 0xFF;
bd5d825c
BP
6289
6290#if defined(ATA_VERBOSE_DEBUG)
6291 /* turn on all debugging levels */
6292 ap->msg_enable = 0x00FF;
6293#elif defined(ATA_DEBUG)
6294 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6295#else
0dd4b21f 6296 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6297#endif
1da177e4 6298
65f27f38
DH
6299 INIT_DELAYED_WORK(&ap->port_task, NULL);
6300 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6301 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6302 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6303 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6304 init_timer_deferrable(&ap->fastdrain_timer);
6305 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6306 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6307
838df628 6308 ap->cbl = ATA_CBL_NONE;
838df628 6309
8989805d 6310 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6311
6312#ifdef ATA_IRQ_TRAP
6313 ap->stats.unhandled_irq = 1;
6314 ap->stats.idle_irq = 1;
6315#endif
1da177e4 6316 return ap;
1da177e4
LT
6317}
6318
f0d36efd
TH
6319static void ata_host_release(struct device *gendev, void *res)
6320{
6321 struct ata_host *host = dev_get_drvdata(gendev);
6322 int i;
6323
6324 for (i = 0; i < host->n_ports; i++) {
6325 struct ata_port *ap = host->ports[i];
6326
ecef7253
TH
6327 if (!ap)
6328 continue;
6329
6330 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6331 ap->ops->port_stop(ap);
f0d36efd
TH
6332 }
6333
ecef7253 6334 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6335 host->ops->host_stop(host);
1aa56cca 6336
1aa506e4
TH
6337 for (i = 0; i < host->n_ports; i++) {
6338 struct ata_port *ap = host->ports[i];
6339
4911487a
TH
6340 if (!ap)
6341 continue;
6342
6343 if (ap->scsi_host)
1aa506e4
TH
6344 scsi_host_put(ap->scsi_host);
6345
4911487a 6346 kfree(ap);
1aa506e4
TH
6347 host->ports[i] = NULL;
6348 }
6349
1aa56cca 6350 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6351}
6352
f3187195
TH
6353/**
6354 * ata_host_alloc - allocate and init basic ATA host resources
6355 * @dev: generic device this host is associated with
6356 * @max_ports: maximum number of ATA ports associated with this host
6357 *
6358 * Allocate and initialize basic ATA host resources. LLD calls
6359 * this function to allocate a host, initializes it fully and
6360 * attaches it using ata_host_register().
6361 *
6362 * @max_ports ports are allocated and host->n_ports is
6363 * initialized to @max_ports. The caller is allowed to decrease
6364 * host->n_ports before calling ata_host_register(). The unused
6365 * ports will be automatically freed on registration.
6366 *
6367 * RETURNS:
6368 * Allocate ATA host on success, NULL on failure.
6369 *
6370 * LOCKING:
6371 * Inherited from calling layer (may sleep).
6372 */
6373struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6374{
6375 struct ata_host *host;
6376 size_t sz;
6377 int i;
6378
6379 DPRINTK("ENTER\n");
6380
6381 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6382 return NULL;
6383
6384 /* alloc a container for our list of ATA ports (buses) */
6385 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6386 /* alloc a container for our list of ATA ports (buses) */
6387 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6388 if (!host)
6389 goto err_out;
6390
6391 devres_add(dev, host);
6392 dev_set_drvdata(dev, host);
6393
6394 spin_lock_init(&host->lock);
6395 host->dev = dev;
6396 host->n_ports = max_ports;
6397
6398 /* allocate ports bound to this host */
6399 for (i = 0; i < max_ports; i++) {
6400 struct ata_port *ap;
6401
6402 ap = ata_port_alloc(host);
6403 if (!ap)
6404 goto err_out;
6405
6406 ap->port_no = i;
6407 host->ports[i] = ap;
6408 }
6409
6410 devres_remove_group(dev, NULL);
6411 return host;
6412
6413 err_out:
6414 devres_release_group(dev, NULL);
6415 return NULL;
6416}
6417
f5cda257
TH
6418/**
6419 * ata_host_alloc_pinfo - alloc host and init with port_info array
6420 * @dev: generic device this host is associated with
6421 * @ppi: array of ATA port_info to initialize host with
6422 * @n_ports: number of ATA ports attached to this host
6423 *
6424 * Allocate ATA host and initialize with info from @ppi. If NULL
6425 * terminated, @ppi may contain fewer entries than @n_ports. The
6426 * last entry will be used for the remaining ports.
6427 *
6428 * RETURNS:
6429 * Allocate ATA host on success, NULL on failure.
6430 *
6431 * LOCKING:
6432 * Inherited from calling layer (may sleep).
6433 */
6434struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6435 const struct ata_port_info * const * ppi,
6436 int n_ports)
6437{
6438 const struct ata_port_info *pi;
6439 struct ata_host *host;
6440 int i, j;
6441
6442 host = ata_host_alloc(dev, n_ports);
6443 if (!host)
6444 return NULL;
6445
6446 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6447 struct ata_port *ap = host->ports[i];
6448
6449 if (ppi[j])
6450 pi = ppi[j++];
6451
6452 ap->pio_mask = pi->pio_mask;
6453 ap->mwdma_mask = pi->mwdma_mask;
6454 ap->udma_mask = pi->udma_mask;
6455 ap->flags |= pi->flags;
0c88758b 6456 ap->link.flags |= pi->link_flags;
f5cda257
TH
6457 ap->ops = pi->port_ops;
6458
6459 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6460 host->ops = pi->port_ops;
6461 if (!host->private_data && pi->private_data)
6462 host->private_data = pi->private_data;
6463 }
6464
6465 return host;
6466}
6467
ecef7253
TH
6468/**
6469 * ata_host_start - start and freeze ports of an ATA host
6470 * @host: ATA host to start ports for
6471 *
6472 * Start and then freeze ports of @host. Started status is
6473 * recorded in host->flags, so this function can be called
6474 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6475 * once. If host->ops isn't initialized yet, its set to the
6476 * first non-dummy port ops.
ecef7253
TH
6477 *
6478 * LOCKING:
6479 * Inherited from calling layer (may sleep).
6480 *
6481 * RETURNS:
6482 * 0 if all ports are started successfully, -errno otherwise.
6483 */
6484int ata_host_start(struct ata_host *host)
6485{
6486 int i, rc;
6487
6488 if (host->flags & ATA_HOST_STARTED)
6489 return 0;
6490
6491 for (i = 0; i < host->n_ports; i++) {
6492 struct ata_port *ap = host->ports[i];
6493
f3187195
TH
6494 if (!host->ops && !ata_port_is_dummy(ap))
6495 host->ops = ap->ops;
6496
ecef7253
TH
6497 if (ap->ops->port_start) {
6498 rc = ap->ops->port_start(ap);
6499 if (rc) {
6500 ata_port_printk(ap, KERN_ERR, "failed to "
6501 "start port (errno=%d)\n", rc);
6502 goto err_out;
6503 }
6504 }
6505
6506 ata_eh_freeze_port(ap);
6507 }
6508
6509 host->flags |= ATA_HOST_STARTED;
6510 return 0;
6511
6512 err_out:
6513 while (--i >= 0) {
6514 struct ata_port *ap = host->ports[i];
6515
6516 if (ap->ops->port_stop)
6517 ap->ops->port_stop(ap);
6518 }
6519 return rc;
6520}
6521
b03732f0 6522/**
cca3974e
JG
6523 * ata_sas_host_init - Initialize a host struct
6524 * @host: host to initialize
6525 * @dev: device host is attached to
6526 * @flags: host flags
6527 * @ops: port_ops
b03732f0
BK
6528 *
6529 * LOCKING:
6530 * PCI/etc. bus probe sem.
6531 *
6532 */
f3187195 6533/* KILLME - the only user left is ipr */
cca3974e
JG
6534void ata_host_init(struct ata_host *host, struct device *dev,
6535 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6536{
cca3974e
JG
6537 spin_lock_init(&host->lock);
6538 host->dev = dev;
6539 host->flags = flags;
6540 host->ops = ops;
b03732f0
BK
6541}
6542
f3187195
TH
6543/**
6544 * ata_host_register - register initialized ATA host
6545 * @host: ATA host to register
6546 * @sht: template for SCSI host
6547 *
6548 * Register initialized ATA host. @host is allocated using
6549 * ata_host_alloc() and fully initialized by LLD. This function
6550 * starts ports, registers @host with ATA and SCSI layers and
6551 * probe registered devices.
6552 *
6553 * LOCKING:
6554 * Inherited from calling layer (may sleep).
6555 *
6556 * RETURNS:
6557 * 0 on success, -errno otherwise.
6558 */
6559int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6560{
6561 int i, rc;
6562
6563 /* host must have been started */
6564 if (!(host->flags & ATA_HOST_STARTED)) {
6565 dev_printk(KERN_ERR, host->dev,
6566 "BUG: trying to register unstarted host\n");
6567 WARN_ON(1);
6568 return -EINVAL;
6569 }
6570
6571 /* Blow away unused ports. This happens when LLD can't
6572 * determine the exact number of ports to allocate at
6573 * allocation time.
6574 */
6575 for (i = host->n_ports; host->ports[i]; i++)
6576 kfree(host->ports[i]);
6577
6578 /* give ports names and add SCSI hosts */
6579 for (i = 0; i < host->n_ports; i++)
6580 host->ports[i]->print_id = ata_print_id++;
6581
6582 rc = ata_scsi_add_hosts(host, sht);
6583 if (rc)
6584 return rc;
6585
fafbae87
TH
6586 /* associate with ACPI nodes */
6587 ata_acpi_associate(host);
6588
f3187195
TH
6589 /* set cable, sata_spd_limit and report */
6590 for (i = 0; i < host->n_ports; i++) {
6591 struct ata_port *ap = host->ports[i];
f3187195
TH
6592 unsigned long xfer_mask;
6593
6594 /* set SATA cable type if still unset */
6595 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6596 ap->cbl = ATA_CBL_SATA;
6597
6598 /* init sata_spd_limit to the current value */
4fb37a25 6599 sata_link_init_spd(&ap->link);
f3187195 6600
cbcdd875 6601 /* print per-port info to dmesg */
f3187195
TH
6602 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6603 ap->udma_mask);
6604
f3187195 6605 if (!ata_port_is_dummy(ap))
cbcdd875
TH
6606 ata_port_printk(ap, KERN_INFO,
6607 "%cATA max %s %s\n",
a16abc0b 6608 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 6609 ata_mode_string(xfer_mask),
cbcdd875 6610 ap->link.eh_info.desc);
f3187195
TH
6611 else
6612 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6613 }
6614
6615 /* perform each probe synchronously */
6616 DPRINTK("probe begin\n");
6617 for (i = 0; i < host->n_ports; i++) {
6618 struct ata_port *ap = host->ports[i];
6619 int rc;
6620
6621 /* probe */
6622 if (ap->ops->error_handler) {
9af5c9c9 6623 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
6624 unsigned long flags;
6625
6626 ata_port_probe(ap);
6627
6628 /* kick EH for boot probing */
6629 spin_lock_irqsave(ap->lock, flags);
6630
f58229f8
TH
6631 ehi->probe_mask =
6632 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
6633 ehi->action |= ATA_EH_SOFTRESET;
6634 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6635
f4d6d004 6636 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6637 ap->pflags |= ATA_PFLAG_LOADING;
6638 ata_port_schedule_eh(ap);
6639
6640 spin_unlock_irqrestore(ap->lock, flags);
6641
6642 /* wait for EH to finish */
6643 ata_port_wait_eh(ap);
6644 } else {
6645 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6646 rc = ata_bus_probe(ap);
6647 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6648
6649 if (rc) {
6650 /* FIXME: do something useful here?
6651 * Current libata behavior will
6652 * tear down everything when
6653 * the module is removed
6654 * or the h/w is unplugged.
6655 */
6656 }
6657 }
6658 }
6659
6660 /* probes are done, now scan each port's disk(s) */
6661 DPRINTK("host probe begin\n");
6662 for (i = 0; i < host->n_ports; i++) {
6663 struct ata_port *ap = host->ports[i];
6664
1ae46317 6665 ata_scsi_scan_host(ap, 1);
f3187195
TH
6666 }
6667
6668 return 0;
6669}
6670
f5cda257
TH
6671/**
6672 * ata_host_activate - start host, request IRQ and register it
6673 * @host: target ATA host
6674 * @irq: IRQ to request
6675 * @irq_handler: irq_handler used when requesting IRQ
6676 * @irq_flags: irq_flags used when requesting IRQ
6677 * @sht: scsi_host_template to use when registering the host
6678 *
6679 * After allocating an ATA host and initializing it, most libata
6680 * LLDs perform three steps to activate the host - start host,
6681 * request IRQ and register it. This helper takes necessasry
6682 * arguments and performs the three steps in one go.
6683 *
6684 * LOCKING:
6685 * Inherited from calling layer (may sleep).
6686 *
6687 * RETURNS:
6688 * 0 on success, -errno otherwise.
6689 */
6690int ata_host_activate(struct ata_host *host, int irq,
6691 irq_handler_t irq_handler, unsigned long irq_flags,
6692 struct scsi_host_template *sht)
6693{
cbcdd875 6694 int i, rc;
f5cda257
TH
6695
6696 rc = ata_host_start(host);
6697 if (rc)
6698 return rc;
6699
6700 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6701 dev_driver_string(host->dev), host);
6702 if (rc)
6703 return rc;
6704
cbcdd875
TH
6705 for (i = 0; i < host->n_ports; i++)
6706 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6707
f5cda257
TH
6708 rc = ata_host_register(host, sht);
6709 /* if failed, just free the IRQ and leave ports alone */
6710 if (rc)
6711 devm_free_irq(host->dev, irq, host);
6712
6713 return rc;
6714}
6715
720ba126
TH
6716/**
6717 * ata_port_detach - Detach ATA port in prepration of device removal
6718 * @ap: ATA port to be detached
6719 *
6720 * Detach all ATA devices and the associated SCSI devices of @ap;
6721 * then, remove the associated SCSI host. @ap is guaranteed to
6722 * be quiescent on return from this function.
6723 *
6724 * LOCKING:
6725 * Kernel thread context (may sleep).
6726 */
6727void ata_port_detach(struct ata_port *ap)
6728{
6729 unsigned long flags;
41bda9c9 6730 struct ata_link *link;
f58229f8 6731 struct ata_device *dev;
720ba126
TH
6732
6733 if (!ap->ops->error_handler)
c3cf30a9 6734 goto skip_eh;
720ba126
TH
6735
6736 /* tell EH we're leaving & flush EH */
ba6a1308 6737 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6738 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6739 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6740
6741 ata_port_wait_eh(ap);
6742
6743 /* EH is now guaranteed to see UNLOADING, so no new device
6744 * will be attached. Disable all existing devices.
6745 */
ba6a1308 6746 spin_lock_irqsave(ap->lock, flags);
720ba126 6747
41bda9c9
TH
6748 ata_port_for_each_link(link, ap) {
6749 ata_link_for_each_dev(dev, link)
6750 ata_dev_disable(dev);
6751 }
720ba126 6752
ba6a1308 6753 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6754
6755 /* Final freeze & EH. All in-flight commands are aborted. EH
6756 * will be skipped and retrials will be terminated with bad
6757 * target.
6758 */
ba6a1308 6759 spin_lock_irqsave(ap->lock, flags);
720ba126 6760 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6761 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6762
6763 ata_port_wait_eh(ap);
45a66c1c 6764 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 6765
c3cf30a9 6766 skip_eh:
720ba126 6767 /* remove the associated SCSI host */
cca3974e 6768 scsi_remove_host(ap->scsi_host);
720ba126
TH
6769}
6770
0529c159
TH
6771/**
6772 * ata_host_detach - Detach all ports of an ATA host
6773 * @host: Host to detach
6774 *
6775 * Detach all ports of @host.
6776 *
6777 * LOCKING:
6778 * Kernel thread context (may sleep).
6779 */
6780void ata_host_detach(struct ata_host *host)
6781{
6782 int i;
6783
6784 for (i = 0; i < host->n_ports; i++)
6785 ata_port_detach(host->ports[i]);
6786}
6787
1da177e4
LT
6788/**
6789 * ata_std_ports - initialize ioaddr with standard port offsets.
6790 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6791 *
6792 * Utility function which initializes data_addr, error_addr,
6793 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6794 * device_addr, status_addr, and command_addr to standard offsets
6795 * relative to cmd_addr.
6796 *
6797 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6798 */
0baab86b 6799
1da177e4
LT
6800void ata_std_ports(struct ata_ioports *ioaddr)
6801{
6802 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6803 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6804 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6805 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6806 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6807 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6808 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6809 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6810 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6811 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6812}
6813
0baab86b 6814
374b1873
JG
6815#ifdef CONFIG_PCI
6816
1da177e4
LT
6817/**
6818 * ata_pci_remove_one - PCI layer callback for device removal
6819 * @pdev: PCI device that was removed
6820 *
b878ca5d
TH
6821 * PCI layer indicates to libata via this hook that hot-unplug or
6822 * module unload event has occurred. Detach all ports. Resource
6823 * release is handled via devres.
1da177e4
LT
6824 *
6825 * LOCKING:
6826 * Inherited from PCI layer (may sleep).
6827 */
f0d36efd 6828void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6829{
6830 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6831 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6832
b878ca5d 6833 ata_host_detach(host);
1da177e4
LT
6834}
6835
6836/* move to PCI subsystem */
057ace5e 6837int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6838{
6839 unsigned long tmp = 0;
6840
6841 switch (bits->width) {
6842 case 1: {
6843 u8 tmp8 = 0;
6844 pci_read_config_byte(pdev, bits->reg, &tmp8);
6845 tmp = tmp8;
6846 break;
6847 }
6848 case 2: {
6849 u16 tmp16 = 0;
6850 pci_read_config_word(pdev, bits->reg, &tmp16);
6851 tmp = tmp16;
6852 break;
6853 }
6854 case 4: {
6855 u32 tmp32 = 0;
6856 pci_read_config_dword(pdev, bits->reg, &tmp32);
6857 tmp = tmp32;
6858 break;
6859 }
6860
6861 default:
6862 return -EINVAL;
6863 }
6864
6865 tmp &= bits->mask;
6866
6867 return (tmp == bits->val) ? 1 : 0;
6868}
9b847548 6869
6ffa01d8 6870#ifdef CONFIG_PM
3c5100c1 6871void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6872{
6873 pci_save_state(pdev);
4c90d971 6874 pci_disable_device(pdev);
500530f6 6875
4c90d971 6876 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6877 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6878}
6879
553c4aa6 6880int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6881{
553c4aa6
TH
6882 int rc;
6883
9b847548
JA
6884 pci_set_power_state(pdev, PCI_D0);
6885 pci_restore_state(pdev);
553c4aa6 6886
b878ca5d 6887 rc = pcim_enable_device(pdev);
553c4aa6
TH
6888 if (rc) {
6889 dev_printk(KERN_ERR, &pdev->dev,
6890 "failed to enable device after resume (%d)\n", rc);
6891 return rc;
6892 }
6893
9b847548 6894 pci_set_master(pdev);
553c4aa6 6895 return 0;
500530f6
TH
6896}
6897
3c5100c1 6898int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6899{
cca3974e 6900 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6901 int rc = 0;
6902
cca3974e 6903 rc = ata_host_suspend(host, mesg);
500530f6
TH
6904 if (rc)
6905 return rc;
6906
3c5100c1 6907 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6908
6909 return 0;
6910}
6911
6912int ata_pci_device_resume(struct pci_dev *pdev)
6913{
cca3974e 6914 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6915 int rc;
500530f6 6916
553c4aa6
TH
6917 rc = ata_pci_device_do_resume(pdev);
6918 if (rc == 0)
6919 ata_host_resume(host);
6920 return rc;
9b847548 6921}
6ffa01d8
TH
6922#endif /* CONFIG_PM */
6923
1da177e4
LT
6924#endif /* CONFIG_PCI */
6925
6926
1da177e4
LT
6927static int __init ata_init(void)
6928{
a8601e5f 6929 ata_probe_timeout *= HZ;
1da177e4
LT
6930 ata_wq = create_workqueue("ata");
6931 if (!ata_wq)
6932 return -ENOMEM;
6933
453b07ac
TH
6934 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6935 if (!ata_aux_wq) {
6936 destroy_workqueue(ata_wq);
6937 return -ENOMEM;
6938 }
6939
1da177e4
LT
6940 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6941 return 0;
6942}
6943
6944static void __exit ata_exit(void)
6945{
6946 destroy_workqueue(ata_wq);
453b07ac 6947 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6948}
6949
a4625085 6950subsys_initcall(ata_init);
1da177e4
LT
6951module_exit(ata_exit);
6952
67846b30 6953static unsigned long ratelimit_time;
34af946a 6954static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6955
6956int ata_ratelimit(void)
6957{
6958 int rc;
6959 unsigned long flags;
6960
6961 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6962
6963 if (time_after(jiffies, ratelimit_time)) {
6964 rc = 1;
6965 ratelimit_time = jiffies + (HZ/5);
6966 } else
6967 rc = 0;
6968
6969 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6970
6971 return rc;
6972}
6973
c22daff4
TH
6974/**
6975 * ata_wait_register - wait until register value changes
6976 * @reg: IO-mapped register
6977 * @mask: Mask to apply to read register value
6978 * @val: Wait condition
6979 * @interval_msec: polling interval in milliseconds
6980 * @timeout_msec: timeout in milliseconds
6981 *
6982 * Waiting for some bits of register to change is a common
6983 * operation for ATA controllers. This function reads 32bit LE
6984 * IO-mapped register @reg and tests for the following condition.
6985 *
6986 * (*@reg & mask) != val
6987 *
6988 * If the condition is met, it returns; otherwise, the process is
6989 * repeated after @interval_msec until timeout.
6990 *
6991 * LOCKING:
6992 * Kernel thread context (may sleep)
6993 *
6994 * RETURNS:
6995 * The final register value.
6996 */
6997u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6998 unsigned long interval_msec,
6999 unsigned long timeout_msec)
7000{
7001 unsigned long timeout;
7002 u32 tmp;
7003
7004 tmp = ioread32(reg);
7005
7006 /* Calculate timeout _after_ the first read to make sure
7007 * preceding writes reach the controller before starting to
7008 * eat away the timeout.
7009 */
7010 timeout = jiffies + (timeout_msec * HZ) / 1000;
7011
7012 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7013 msleep(interval_msec);
7014 tmp = ioread32(reg);
7015 }
7016
7017 return tmp;
7018}
7019
dd5b06c4
TH
7020/*
7021 * Dummy port_ops
7022 */
7023static void ata_dummy_noret(struct ata_port *ap) { }
7024static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7025static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7026
7027static u8 ata_dummy_check_status(struct ata_port *ap)
7028{
7029 return ATA_DRDY;
7030}
7031
7032static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7033{
7034 return AC_ERR_SYSTEM;
7035}
7036
7037const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7038 .check_status = ata_dummy_check_status,
7039 .check_altstatus = ata_dummy_check_status,
7040 .dev_select = ata_noop_dev_select,
7041 .qc_prep = ata_noop_qc_prep,
7042 .qc_issue = ata_dummy_qc_issue,
7043 .freeze = ata_dummy_noret,
7044 .thaw = ata_dummy_noret,
7045 .error_handler = ata_dummy_noret,
7046 .post_internal_cmd = ata_dummy_qc_noret,
7047 .irq_clear = ata_dummy_noret,
7048 .port_start = ata_dummy_ret0,
7049 .port_stop = ata_dummy_noret,
7050};
7051
21b0ad4f
TH
7052const struct ata_port_info ata_dummy_port_info = {
7053 .port_ops = &ata_dummy_port_ops,
7054};
7055
1da177e4
LT
7056/*
7057 * libata is essentially a library of internal helper functions for
7058 * low-level ATA host controller drivers. As such, the API/ABI is
7059 * likely to change as new drivers are added and updated.
7060 * Do not depend on ABI/API stability.
7061 */
7062
e9c83914
TH
7063EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7064EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7065EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7066EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7067EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7068EXPORT_SYMBOL_GPL(ata_std_bios_param);
7069EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7070EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7071EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7072EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7073EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7074EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7075EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7076EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7077EXPORT_SYMBOL_GPL(ata_sg_init);
7078EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7079EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7080EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7081EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7082EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7083EXPORT_SYMBOL_GPL(ata_tf_load);
7084EXPORT_SYMBOL_GPL(ata_tf_read);
7085EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7086EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7087EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7088EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7089EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7090EXPORT_SYMBOL_GPL(ata_check_status);
7091EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7092EXPORT_SYMBOL_GPL(ata_exec_command);
7093EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7094EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7095EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7096EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7097EXPORT_SYMBOL_GPL(ata_data_xfer);
7098EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 7099EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7100EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7101EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7102EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7103EXPORT_SYMBOL_GPL(ata_bmdma_start);
7104EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7105EXPORT_SYMBOL_GPL(ata_bmdma_status);
7106EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7107EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7108EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7109EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7110EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7111EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7112EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7113EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7114EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7115EXPORT_SYMBOL_GPL(sata_link_debounce);
7116EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7117EXPORT_SYMBOL_GPL(sata_phy_reset);
7118EXPORT_SYMBOL_GPL(__sata_phy_reset);
7119EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7120EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7121EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7122EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7123EXPORT_SYMBOL_GPL(sata_std_hardreset);
7124EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7125EXPORT_SYMBOL_GPL(ata_dev_classify);
7126EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7127EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7128EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7129EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7130EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 7131EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7132EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7133EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7134EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7135EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7136EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7137EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7138EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7139EXPORT_SYMBOL_GPL(sata_scr_valid);
7140EXPORT_SYMBOL_GPL(sata_scr_read);
7141EXPORT_SYMBOL_GPL(sata_scr_write);
7142EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7143EXPORT_SYMBOL_GPL(ata_link_online);
7144EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7145#ifdef CONFIG_PM
cca3974e
JG
7146EXPORT_SYMBOL_GPL(ata_host_suspend);
7147EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7148#endif /* CONFIG_PM */
6a62a04d
TH
7149EXPORT_SYMBOL_GPL(ata_id_string);
7150EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7151EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7152EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7153
1bc4ccff 7154EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7155EXPORT_SYMBOL_GPL(ata_timing_compute);
7156EXPORT_SYMBOL_GPL(ata_timing_merge);
7157
1da177e4
LT
7158#ifdef CONFIG_PCI
7159EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7160EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7161EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7162EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7163EXPORT_SYMBOL_GPL(ata_pci_init_one);
7164EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7165#ifdef CONFIG_PM
500530f6
TH
7166EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7167EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7168EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7169EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7170#endif /* CONFIG_PM */
67951ade
AC
7171EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7172EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7173#endif /* CONFIG_PCI */
9b847548 7174
b64bbc39
TH
7175EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7176EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7177EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7178EXPORT_SYMBOL_GPL(ata_port_desc);
7179#ifdef CONFIG_PCI
7180EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7181#endif /* CONFIG_PCI */
ece1d636 7182EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7183EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7184EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7185EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
7186EXPORT_SYMBOL_GPL(ata_port_freeze);
7187EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7188EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7189EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7190EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7191EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7192EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7193EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7194
7195EXPORT_SYMBOL_GPL(ata_cable_40wire);
7196EXPORT_SYMBOL_GPL(ata_cable_80wire);
7197EXPORT_SYMBOL_GPL(ata_cable_unknown);
7198EXPORT_SYMBOL_GPL(ata_cable_sata);