]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/ata/libata-core.c
libata-pmp-prep: make a number of functions global to libata
[mirror_ubuntu-eoan-kernel.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5 62
d7bb4cc7 63/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 67
3373efd8
TH
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
9f45cbd3 71static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
3373efd8 72static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 73static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
baf4fdfa
ML
88int atapi_passthru16 = 1;
89module_param(atapi_passthru16, int, 0444);
90MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
91
c3c013a2
JG
92int libata_fua = 0;
93module_param_named(fua, libata_fua, int, 0444);
94MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
95
1e999736
AC
96static int ata_ignore_hpa = 0;
97module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
98MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
99
a8601e5f
AM
100static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
101module_param(ata_probe_timeout, int, 0444);
102MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
103
d7d0dad6
JG
104int libata_noacpi = 1;
105module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
106MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
107
1da177e4
LT
108MODULE_AUTHOR("Jeff Garzik");
109MODULE_DESCRIPTION("Library module for ATA devices");
110MODULE_LICENSE("GPL");
111MODULE_VERSION(DRV_VERSION);
112
0baab86b 113
1da177e4
LT
114/**
115 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
116 * @tf: Taskfile to convert
1da177e4 117 * @pmp: Port multiplier port
9977126c
TH
118 * @is_cmd: This FIS is for command
119 * @fis: Buffer into which data will output
1da177e4
LT
120 *
121 * Converts a standard ATA taskfile to a Serial ATA
122 * FIS structure (Register - Host to Device).
123 *
124 * LOCKING:
125 * Inherited from caller.
126 */
9977126c 127void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 128{
9977126c
TH
129 fis[0] = 0x27; /* Register - Host to Device FIS */
130 fis[1] = pmp & 0xf; /* Port multiplier number*/
131 if (is_cmd)
132 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
133
1da177e4
LT
134 fis[2] = tf->command;
135 fis[3] = tf->feature;
136
137 fis[4] = tf->lbal;
138 fis[5] = tf->lbam;
139 fis[6] = tf->lbah;
140 fis[7] = tf->device;
141
142 fis[8] = tf->hob_lbal;
143 fis[9] = tf->hob_lbam;
144 fis[10] = tf->hob_lbah;
145 fis[11] = tf->hob_feature;
146
147 fis[12] = tf->nsect;
148 fis[13] = tf->hob_nsect;
149 fis[14] = 0;
150 fis[15] = tf->ctl;
151
152 fis[16] = 0;
153 fis[17] = 0;
154 fis[18] = 0;
155 fis[19] = 0;
156}
157
158/**
159 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
160 * @fis: Buffer from which data will be input
161 * @tf: Taskfile to output
162 *
e12a1be6 163 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
164 *
165 * LOCKING:
166 * Inherited from caller.
167 */
168
057ace5e 169void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
170{
171 tf->command = fis[2]; /* status */
172 tf->feature = fis[3]; /* error */
173
174 tf->lbal = fis[4];
175 tf->lbam = fis[5];
176 tf->lbah = fis[6];
177 tf->device = fis[7];
178
179 tf->hob_lbal = fis[8];
180 tf->hob_lbam = fis[9];
181 tf->hob_lbah = fis[10];
182
183 tf->nsect = fis[12];
184 tf->hob_nsect = fis[13];
185}
186
8cbd6df1
AL
187static const u8 ata_rw_cmds[] = {
188 /* pio multi */
189 ATA_CMD_READ_MULTI,
190 ATA_CMD_WRITE_MULTI,
191 ATA_CMD_READ_MULTI_EXT,
192 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
193 0,
194 0,
195 0,
196 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
197 /* pio */
198 ATA_CMD_PIO_READ,
199 ATA_CMD_PIO_WRITE,
200 ATA_CMD_PIO_READ_EXT,
201 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
202 0,
203 0,
204 0,
205 0,
8cbd6df1
AL
206 /* dma */
207 ATA_CMD_READ,
208 ATA_CMD_WRITE,
209 ATA_CMD_READ_EXT,
9a3dccc4
TH
210 ATA_CMD_WRITE_EXT,
211 0,
212 0,
213 0,
214 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 215};
1da177e4
LT
216
217/**
8cbd6df1 218 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
219 * @tf: command to examine and configure
220 * @dev: device tf belongs to
1da177e4 221 *
2e9edbf8 222 * Examine the device configuration and tf->flags to calculate
8cbd6df1 223 * the proper read/write commands and protocol to use.
1da177e4
LT
224 *
225 * LOCKING:
226 * caller.
227 */
bd056d7e 228static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 229{
9a3dccc4 230 u8 cmd;
1da177e4 231
9a3dccc4 232 int index, fua, lba48, write;
2e9edbf8 233
9a3dccc4 234 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
235 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
236 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 237
8cbd6df1
AL
238 if (dev->flags & ATA_DFLAG_PIO) {
239 tf->protocol = ATA_PROT_PIO;
9a3dccc4 240 index = dev->multi_count ? 0 : 8;
9af5c9c9 241 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
242 /* Unable to use DMA due to host limitation */
243 tf->protocol = ATA_PROT_PIO;
0565c26d 244 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
245 } else {
246 tf->protocol = ATA_PROT_DMA;
9a3dccc4 247 index = 16;
8cbd6df1 248 }
1da177e4 249
9a3dccc4
TH
250 cmd = ata_rw_cmds[index + fua + lba48 + write];
251 if (cmd) {
252 tf->command = cmd;
253 return 0;
254 }
255 return -1;
1da177e4
LT
256}
257
35b649fe
TH
258/**
259 * ata_tf_read_block - Read block address from ATA taskfile
260 * @tf: ATA taskfile of interest
261 * @dev: ATA device @tf belongs to
262 *
263 * LOCKING:
264 * None.
265 *
266 * Read block address from @tf. This function can handle all
267 * three address formats - LBA, LBA48 and CHS. tf->protocol and
268 * flags select the address format to use.
269 *
270 * RETURNS:
271 * Block address read from @tf.
272 */
273u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
274{
275 u64 block = 0;
276
277 if (tf->flags & ATA_TFLAG_LBA) {
278 if (tf->flags & ATA_TFLAG_LBA48) {
279 block |= (u64)tf->hob_lbah << 40;
280 block |= (u64)tf->hob_lbam << 32;
281 block |= tf->hob_lbal << 24;
282 } else
283 block |= (tf->device & 0xf) << 24;
284
285 block |= tf->lbah << 16;
286 block |= tf->lbam << 8;
287 block |= tf->lbal;
288 } else {
289 u32 cyl, head, sect;
290
291 cyl = tf->lbam | (tf->lbah << 8);
292 head = tf->device & 0xf;
293 sect = tf->lbal;
294
295 block = (cyl * dev->heads + head) * dev->sectors + sect;
296 }
297
298 return block;
299}
300
bd056d7e
TH
301/**
302 * ata_build_rw_tf - Build ATA taskfile for given read/write request
303 * @tf: Target ATA taskfile
304 * @dev: ATA device @tf belongs to
305 * @block: Block address
306 * @n_block: Number of blocks
307 * @tf_flags: RW/FUA etc...
308 * @tag: tag
309 *
310 * LOCKING:
311 * None.
312 *
313 * Build ATA taskfile @tf for read/write request described by
314 * @block, @n_block, @tf_flags and @tag on @dev.
315 *
316 * RETURNS:
317 *
318 * 0 on success, -ERANGE if the request is too large for @dev,
319 * -EINVAL if the request is invalid.
320 */
321int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
322 u64 block, u32 n_block, unsigned int tf_flags,
323 unsigned int tag)
324{
325 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
326 tf->flags |= tf_flags;
327
6d1245bf 328 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
329 /* yay, NCQ */
330 if (!lba_48_ok(block, n_block))
331 return -ERANGE;
332
333 tf->protocol = ATA_PROT_NCQ;
334 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
335
336 if (tf->flags & ATA_TFLAG_WRITE)
337 tf->command = ATA_CMD_FPDMA_WRITE;
338 else
339 tf->command = ATA_CMD_FPDMA_READ;
340
341 tf->nsect = tag << 3;
342 tf->hob_feature = (n_block >> 8) & 0xff;
343 tf->feature = n_block & 0xff;
344
345 tf->hob_lbah = (block >> 40) & 0xff;
346 tf->hob_lbam = (block >> 32) & 0xff;
347 tf->hob_lbal = (block >> 24) & 0xff;
348 tf->lbah = (block >> 16) & 0xff;
349 tf->lbam = (block >> 8) & 0xff;
350 tf->lbal = block & 0xff;
351
352 tf->device = 1 << 6;
353 if (tf->flags & ATA_TFLAG_FUA)
354 tf->device |= 1 << 7;
355 } else if (dev->flags & ATA_DFLAG_LBA) {
356 tf->flags |= ATA_TFLAG_LBA;
357
358 if (lba_28_ok(block, n_block)) {
359 /* use LBA28 */
360 tf->device |= (block >> 24) & 0xf;
361 } else if (lba_48_ok(block, n_block)) {
362 if (!(dev->flags & ATA_DFLAG_LBA48))
363 return -ERANGE;
364
365 /* use LBA48 */
366 tf->flags |= ATA_TFLAG_LBA48;
367
368 tf->hob_nsect = (n_block >> 8) & 0xff;
369
370 tf->hob_lbah = (block >> 40) & 0xff;
371 tf->hob_lbam = (block >> 32) & 0xff;
372 tf->hob_lbal = (block >> 24) & 0xff;
373 } else
374 /* request too large even for LBA48 */
375 return -ERANGE;
376
377 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
378 return -EINVAL;
379
380 tf->nsect = n_block & 0xff;
381
382 tf->lbah = (block >> 16) & 0xff;
383 tf->lbam = (block >> 8) & 0xff;
384 tf->lbal = block & 0xff;
385
386 tf->device |= ATA_LBA;
387 } else {
388 /* CHS */
389 u32 sect, head, cyl, track;
390
391 /* The request -may- be too large for CHS addressing. */
392 if (!lba_28_ok(block, n_block))
393 return -ERANGE;
394
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396 return -EINVAL;
397
398 /* Convert LBA to CHS */
399 track = (u32)block / dev->sectors;
400 cyl = track / dev->heads;
401 head = track % dev->heads;
402 sect = (u32)block % dev->sectors + 1;
403
404 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
405 (u32)block, track, cyl, head, sect);
406
407 /* Check whether the converted CHS can fit.
408 Cylinder: 0-65535
409 Head: 0-15
410 Sector: 1-255*/
411 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
412 return -ERANGE;
413
414 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
415 tf->lbal = sect;
416 tf->lbam = cyl;
417 tf->lbah = cyl >> 8;
418 tf->device |= head;
419 }
420
421 return 0;
422}
423
cb95d562
TH
424/**
425 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
426 * @pio_mask: pio_mask
427 * @mwdma_mask: mwdma_mask
428 * @udma_mask: udma_mask
429 *
430 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
431 * unsigned int xfer_mask.
432 *
433 * LOCKING:
434 * None.
435 *
436 * RETURNS:
437 * Packed xfer_mask.
438 */
439static unsigned int ata_pack_xfermask(unsigned int pio_mask,
440 unsigned int mwdma_mask,
441 unsigned int udma_mask)
442{
443 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
444 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
445 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
446}
447
c0489e4e
TH
448/**
449 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
450 * @xfer_mask: xfer_mask to unpack
451 * @pio_mask: resulting pio_mask
452 * @mwdma_mask: resulting mwdma_mask
453 * @udma_mask: resulting udma_mask
454 *
455 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
456 * Any NULL distination masks will be ignored.
457 */
458static void ata_unpack_xfermask(unsigned int xfer_mask,
459 unsigned int *pio_mask,
460 unsigned int *mwdma_mask,
461 unsigned int *udma_mask)
462{
463 if (pio_mask)
464 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
465 if (mwdma_mask)
466 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
467 if (udma_mask)
468 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
469}
470
cb95d562 471static const struct ata_xfer_ent {
be9a50c8 472 int shift, bits;
cb95d562
TH
473 u8 base;
474} ata_xfer_tbl[] = {
475 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
476 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
477 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
478 { -1, },
479};
480
481/**
482 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
483 * @xfer_mask: xfer_mask of interest
484 *
485 * Return matching XFER_* value for @xfer_mask. Only the highest
486 * bit of @xfer_mask is considered.
487 *
488 * LOCKING:
489 * None.
490 *
491 * RETURNS:
492 * Matching XFER_* value, 0 if no match found.
493 */
494static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
495{
496 int highbit = fls(xfer_mask) - 1;
497 const struct ata_xfer_ent *ent;
498
499 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
500 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
501 return ent->base + highbit - ent->shift;
502 return 0;
503}
504
505/**
506 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
507 * @xfer_mode: XFER_* of interest
508 *
509 * Return matching xfer_mask for @xfer_mode.
510 *
511 * LOCKING:
512 * None.
513 *
514 * RETURNS:
515 * Matching xfer_mask, 0 if no match found.
516 */
517static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
518{
519 const struct ata_xfer_ent *ent;
520
521 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
522 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
523 return 1 << (ent->shift + xfer_mode - ent->base);
524 return 0;
525}
526
527/**
528 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
529 * @xfer_mode: XFER_* of interest
530 *
531 * Return matching xfer_shift for @xfer_mode.
532 *
533 * LOCKING:
534 * None.
535 *
536 * RETURNS:
537 * Matching xfer_shift, -1 if no match found.
538 */
539static int ata_xfer_mode2shift(unsigned int xfer_mode)
540{
541 const struct ata_xfer_ent *ent;
542
543 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
544 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
545 return ent->shift;
546 return -1;
547}
548
1da177e4 549/**
1da7b0d0
TH
550 * ata_mode_string - convert xfer_mask to string
551 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
552 *
553 * Determine string which represents the highest speed
1da7b0d0 554 * (highest bit in @modemask).
1da177e4
LT
555 *
556 * LOCKING:
557 * None.
558 *
559 * RETURNS:
560 * Constant C string representing highest speed listed in
1da7b0d0 561 * @mode_mask, or the constant C string "<n/a>".
1da177e4 562 */
1da7b0d0 563static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 564{
75f554bc
TH
565 static const char * const xfer_mode_str[] = {
566 "PIO0",
567 "PIO1",
568 "PIO2",
569 "PIO3",
570 "PIO4",
b352e57d
AC
571 "PIO5",
572 "PIO6",
75f554bc
TH
573 "MWDMA0",
574 "MWDMA1",
575 "MWDMA2",
b352e57d
AC
576 "MWDMA3",
577 "MWDMA4",
75f554bc
TH
578 "UDMA/16",
579 "UDMA/25",
580 "UDMA/33",
581 "UDMA/44",
582 "UDMA/66",
583 "UDMA/100",
584 "UDMA/133",
585 "UDMA7",
586 };
1da7b0d0 587 int highbit;
1da177e4 588
1da7b0d0
TH
589 highbit = fls(xfer_mask) - 1;
590 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
591 return xfer_mode_str[highbit];
1da177e4 592 return "<n/a>";
1da177e4
LT
593}
594
4c360c81
TH
595static const char *sata_spd_string(unsigned int spd)
596{
597 static const char * const spd_str[] = {
598 "1.5 Gbps",
599 "3.0 Gbps",
600 };
601
602 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
603 return "<unknown>";
604 return spd_str[spd - 1];
605}
606
3373efd8 607void ata_dev_disable(struct ata_device *dev)
0b8efb0a 608{
09d7f9b0 609 if (ata_dev_enabled(dev)) {
9af5c9c9 610 if (ata_msg_drv(dev->link->ap))
09d7f9b0 611 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
612 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
613 ATA_DNXFER_QUIET);
0b8efb0a
TH
614 dev->class++;
615 }
616}
617
1da177e4 618/**
0d5ff566 619 * ata_devchk - PATA device presence detection
1da177e4
LT
620 * @ap: ATA channel to examine
621 * @device: Device to examine (starting at zero)
622 *
623 * This technique was originally described in
624 * Hale Landis's ATADRVR (www.ata-atapi.com), and
625 * later found its way into the ATA/ATAPI spec.
626 *
627 * Write a pattern to the ATA shadow registers,
628 * and if a device is present, it will respond by
629 * correctly storing and echoing back the
630 * ATA shadow register contents.
631 *
632 * LOCKING:
633 * caller.
634 */
635
0d5ff566 636static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
637{
638 struct ata_ioports *ioaddr = &ap->ioaddr;
639 u8 nsect, lbal;
640
641 ap->ops->dev_select(ap, device);
642
0d5ff566
TH
643 iowrite8(0x55, ioaddr->nsect_addr);
644 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 645
0d5ff566
TH
646 iowrite8(0xaa, ioaddr->nsect_addr);
647 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 648
0d5ff566
TH
649 iowrite8(0x55, ioaddr->nsect_addr);
650 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 651
0d5ff566
TH
652 nsect = ioread8(ioaddr->nsect_addr);
653 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
654
655 if ((nsect == 0x55) && (lbal == 0xaa))
656 return 1; /* we found a device */
657
658 return 0; /* nothing found */
659}
660
1da177e4
LT
661/**
662 * ata_dev_classify - determine device type based on ATA-spec signature
663 * @tf: ATA taskfile register set for device to be identified
664 *
665 * Determine from taskfile register contents whether a device is
666 * ATA or ATAPI, as per "Signature and persistence" section
667 * of ATA/PI spec (volume 1, sect 5.14).
668 *
669 * LOCKING:
670 * None.
671 *
672 * RETURNS:
673 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
674 * the event of failure.
675 */
676
057ace5e 677unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
678{
679 /* Apple's open source Darwin code hints that some devices only
680 * put a proper signature into the LBA mid/high registers,
681 * So, we only check those. It's sufficient for uniqueness.
682 */
683
684 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
685 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
686 DPRINTK("found ATA device by sig\n");
687 return ATA_DEV_ATA;
688 }
689
690 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
691 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
692 DPRINTK("found ATAPI device by sig\n");
693 return ATA_DEV_ATAPI;
694 }
695
696 DPRINTK("unknown device\n");
697 return ATA_DEV_UNKNOWN;
698}
699
700/**
701 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
702 * @dev: ATA device to classify (starting at zero)
703 * @present: device seems present
b4dc7623 704 * @r_err: Value of error register on completion
1da177e4
LT
705 *
706 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
707 * an ATA/ATAPI-defined set of values is placed in the ATA
708 * shadow registers, indicating the results of device detection
709 * and diagnostics.
710 *
711 * Select the ATA device, and read the values from the ATA shadow
712 * registers. Then parse according to the Error register value,
713 * and the spec-defined values examined by ata_dev_classify().
714 *
715 * LOCKING:
716 * caller.
b4dc7623
TH
717 *
718 * RETURNS:
719 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 720 */
3f19859e
TH
721unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
722 u8 *r_err)
1da177e4 723{
3f19859e 724 struct ata_port *ap = dev->link->ap;
1da177e4
LT
725 struct ata_taskfile tf;
726 unsigned int class;
727 u8 err;
728
3f19859e 729 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
730
731 memset(&tf, 0, sizeof(tf));
732
1da177e4 733 ap->ops->tf_read(ap, &tf);
0169e284 734 err = tf.feature;
b4dc7623
TH
735 if (r_err)
736 *r_err = err;
1da177e4 737
93590859 738 /* see if device passed diags: if master then continue and warn later */
3f19859e 739 if (err == 0 && dev->devno == 0)
93590859 740 /* diagnostic fail : do nothing _YET_ */
3f19859e 741 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 742 else if (err == 1)
1da177e4 743 /* do nothing */ ;
3f19859e 744 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
745 /* do nothing */ ;
746 else
b4dc7623 747 return ATA_DEV_NONE;
1da177e4 748
b4dc7623 749 /* determine if device is ATA or ATAPI */
1da177e4 750 class = ata_dev_classify(&tf);
b4dc7623 751
d7fbee05
TH
752 if (class == ATA_DEV_UNKNOWN) {
753 /* If the device failed diagnostic, it's likely to
754 * have reported incorrect device signature too.
755 * Assume ATA device if the device seems present but
756 * device signature is invalid with diagnostic
757 * failure.
758 */
759 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
760 class = ATA_DEV_ATA;
761 else
762 class = ATA_DEV_NONE;
763 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
764 class = ATA_DEV_NONE;
765
b4dc7623 766 return class;
1da177e4
LT
767}
768
769/**
6a62a04d 770 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
771 * @id: IDENTIFY DEVICE results we will examine
772 * @s: string into which data is output
773 * @ofs: offset into identify device page
774 * @len: length of string to return. must be an even number.
775 *
776 * The strings in the IDENTIFY DEVICE page are broken up into
777 * 16-bit chunks. Run through the string, and output each
778 * 8-bit chunk linearly, regardless of platform.
779 *
780 * LOCKING:
781 * caller.
782 */
783
6a62a04d
TH
784void ata_id_string(const u16 *id, unsigned char *s,
785 unsigned int ofs, unsigned int len)
1da177e4
LT
786{
787 unsigned int c;
788
789 while (len > 0) {
790 c = id[ofs] >> 8;
791 *s = c;
792 s++;
793
794 c = id[ofs] & 0xff;
795 *s = c;
796 s++;
797
798 ofs++;
799 len -= 2;
800 }
801}
802
0e949ff3 803/**
6a62a04d 804 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
805 * @id: IDENTIFY DEVICE results we will examine
806 * @s: string into which data is output
807 * @ofs: offset into identify device page
808 * @len: length of string to return. must be an odd number.
809 *
6a62a04d 810 * This function is identical to ata_id_string except that it
0e949ff3
TH
811 * trims trailing spaces and terminates the resulting string with
812 * null. @len must be actual maximum length (even number) + 1.
813 *
814 * LOCKING:
815 * caller.
816 */
6a62a04d
TH
817void ata_id_c_string(const u16 *id, unsigned char *s,
818 unsigned int ofs, unsigned int len)
0e949ff3
TH
819{
820 unsigned char *p;
821
822 WARN_ON(!(len & 1));
823
6a62a04d 824 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
825
826 p = s + strnlen(s, len - 1);
827 while (p > s && p[-1] == ' ')
828 p--;
829 *p = '\0';
830}
0baab86b 831
db6f8759
TH
832static u64 ata_id_n_sectors(const u16 *id)
833{
834 if (ata_id_has_lba(id)) {
835 if (ata_id_has_lba48(id))
836 return ata_id_u64(id, 100);
837 else
838 return ata_id_u32(id, 60);
839 } else {
840 if (ata_id_current_chs_valid(id))
841 return ata_id_u32(id, 57);
842 else
843 return id[1] * id[3] * id[6];
844 }
845}
846
1e999736
AC
847static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
848{
849 u64 sectors = 0;
850
851 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
852 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
853 sectors |= (tf->hob_lbal & 0xff) << 24;
854 sectors |= (tf->lbah & 0xff) << 16;
855 sectors |= (tf->lbam & 0xff) << 8;
856 sectors |= (tf->lbal & 0xff);
857
858 return ++sectors;
859}
860
861static u64 ata_tf_to_lba(struct ata_taskfile *tf)
862{
863 u64 sectors = 0;
864
865 sectors |= (tf->device & 0x0f) << 24;
866 sectors |= (tf->lbah & 0xff) << 16;
867 sectors |= (tf->lbam & 0xff) << 8;
868 sectors |= (tf->lbal & 0xff);
869
870 return ++sectors;
871}
872
873/**
c728a914
TH
874 * ata_read_native_max_address - Read native max address
875 * @dev: target device
876 * @max_sectors: out parameter for the result native max address
1e999736 877 *
c728a914
TH
878 * Perform an LBA48 or LBA28 native size query upon the device in
879 * question.
1e999736 880 *
c728a914
TH
881 * RETURNS:
882 * 0 on success, -EACCES if command is aborted by the drive.
883 * -EIO on other errors.
1e999736 884 */
c728a914 885static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 886{
c728a914 887 unsigned int err_mask;
1e999736 888 struct ata_taskfile tf;
c728a914 889 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
890
891 ata_tf_init(dev, &tf);
892
c728a914 893 /* always clear all address registers */
1e999736 894 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 895
c728a914
TH
896 if (lba48) {
897 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
898 tf.flags |= ATA_TFLAG_LBA48;
899 } else
900 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 901
1e999736 902 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
903 tf.device |= ATA_LBA;
904
905 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
906 if (err_mask) {
907 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
908 "max address (err_mask=0x%x)\n", err_mask);
909 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
910 return -EACCES;
911 return -EIO;
912 }
1e999736 913
c728a914
TH
914 if (lba48)
915 *max_sectors = ata_tf_to_lba48(&tf);
916 else
917 *max_sectors = ata_tf_to_lba(&tf);
1e999736 918
c728a914 919 return 0;
1e999736
AC
920}
921
922/**
c728a914
TH
923 * ata_set_max_sectors - Set max sectors
924 * @dev: target device
6b38d1d1 925 * @new_sectors: new max sectors value to set for the device
1e999736 926 *
c728a914
TH
927 * Set max sectors of @dev to @new_sectors.
928 *
929 * RETURNS:
930 * 0 on success, -EACCES if command is aborted or denied (due to
931 * previous non-volatile SET_MAX) by the drive. -EIO on other
932 * errors.
1e999736 933 */
05027adc 934static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 935{
c728a914 936 unsigned int err_mask;
1e999736 937 struct ata_taskfile tf;
c728a914 938 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
939
940 new_sectors--;
941
942 ata_tf_init(dev, &tf);
943
1e999736 944 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
945
946 if (lba48) {
947 tf.command = ATA_CMD_SET_MAX_EXT;
948 tf.flags |= ATA_TFLAG_LBA48;
949
950 tf.hob_lbal = (new_sectors >> 24) & 0xff;
951 tf.hob_lbam = (new_sectors >> 32) & 0xff;
952 tf.hob_lbah = (new_sectors >> 40) & 0xff;
953 } else
954 tf.command = ATA_CMD_SET_MAX;
955
1e999736 956 tf.protocol |= ATA_PROT_NODATA;
c728a914 957 tf.device |= ATA_LBA;
1e999736
AC
958
959 tf.lbal = (new_sectors >> 0) & 0xff;
960 tf.lbam = (new_sectors >> 8) & 0xff;
961 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 962
c728a914
TH
963 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
964 if (err_mask) {
965 ata_dev_printk(dev, KERN_WARNING, "failed to set "
966 "max address (err_mask=0x%x)\n", err_mask);
967 if (err_mask == AC_ERR_DEV &&
968 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
969 return -EACCES;
970 return -EIO;
971 }
972
c728a914 973 return 0;
1e999736
AC
974}
975
976/**
977 * ata_hpa_resize - Resize a device with an HPA set
978 * @dev: Device to resize
979 *
980 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
981 * it if required to the full size of the media. The caller must check
982 * the drive has the HPA feature set enabled.
05027adc
TH
983 *
984 * RETURNS:
985 * 0 on success, -errno on failure.
1e999736 986 */
05027adc 987static int ata_hpa_resize(struct ata_device *dev)
1e999736 988{
05027adc
TH
989 struct ata_eh_context *ehc = &dev->link->eh_context;
990 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
991 u64 sectors = ata_id_n_sectors(dev->id);
992 u64 native_sectors;
c728a914 993 int rc;
a617c09f 994
05027adc
TH
995 /* do we need to do it? */
996 if (dev->class != ATA_DEV_ATA ||
997 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
998 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 999 return 0;
1e999736 1000
05027adc
TH
1001 /* read native max address */
1002 rc = ata_read_native_max_address(dev, &native_sectors);
1003 if (rc) {
1004 /* If HPA isn't going to be unlocked, skip HPA
1005 * resizing from the next try.
1006 */
1007 if (!ata_ignore_hpa) {
1008 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1009 "broken, will skip HPA handling\n");
1010 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1011
1012 /* we can continue if device aborted the command */
1013 if (rc == -EACCES)
1014 rc = 0;
1e999736 1015 }
37301a55 1016
05027adc
TH
1017 return rc;
1018 }
1019
1020 /* nothing to do? */
1021 if (native_sectors <= sectors || !ata_ignore_hpa) {
1022 if (!print_info || native_sectors == sectors)
1023 return 0;
1024
1025 if (native_sectors > sectors)
1026 ata_dev_printk(dev, KERN_INFO,
1027 "HPA detected: current %llu, native %llu\n",
1028 (unsigned long long)sectors,
1029 (unsigned long long)native_sectors);
1030 else if (native_sectors < sectors)
1031 ata_dev_printk(dev, KERN_WARNING,
1032 "native sectors (%llu) is smaller than "
1033 "sectors (%llu)\n",
1034 (unsigned long long)native_sectors,
1035 (unsigned long long)sectors);
1036 return 0;
1037 }
1038
1039 /* let's unlock HPA */
1040 rc = ata_set_max_sectors(dev, native_sectors);
1041 if (rc == -EACCES) {
1042 /* if device aborted the command, skip HPA resizing */
1043 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1044 "(%llu -> %llu), skipping HPA handling\n",
1045 (unsigned long long)sectors,
1046 (unsigned long long)native_sectors);
1047 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1048 return 0;
1049 } else if (rc)
1050 return rc;
1051
1052 /* re-read IDENTIFY data */
1053 rc = ata_dev_reread_id(dev, 0);
1054 if (rc) {
1055 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1056 "data after HPA resizing\n");
1057 return rc;
1058 }
1059
1060 if (print_info) {
1061 u64 new_sectors = ata_id_n_sectors(dev->id);
1062 ata_dev_printk(dev, KERN_INFO,
1063 "HPA unlocked: %llu -> %llu, native %llu\n",
1064 (unsigned long long)sectors,
1065 (unsigned long long)new_sectors,
1066 (unsigned long long)native_sectors);
1067 }
1068
1069 return 0;
1e999736
AC
1070}
1071
10305f0f
AC
1072/**
1073 * ata_id_to_dma_mode - Identify DMA mode from id block
1074 * @dev: device to identify
cc261267 1075 * @unknown: mode to assume if we cannot tell
10305f0f
AC
1076 *
1077 * Set up the timing values for the device based upon the identify
1078 * reported values for the DMA mode. This function is used by drivers
1079 * which rely upon firmware configured modes, but wish to report the
1080 * mode correctly when possible.
1081 *
1082 * In addition we emit similarly formatted messages to the default
1083 * ata_dev_set_mode handler, in order to provide consistency of
1084 * presentation.
1085 */
1086
1087void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1088{
1089 unsigned int mask;
1090 u8 mode;
1091
1092 /* Pack the DMA modes */
1093 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1094 if (dev->id[53] & 0x04)
1095 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1096
1097 /* Select the mode in use */
1098 mode = ata_xfer_mask2mode(mask);
1099
1100 if (mode != 0) {
1101 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1102 ata_mode_string(mask));
1103 } else {
1104 /* SWDMA perhaps ? */
1105 mode = unknown;
1106 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1107 }
1108
1109 /* Configure the device reporting */
1110 dev->xfer_mode = mode;
1111 dev->xfer_shift = ata_xfer_mode2shift(mode);
1112}
1113
0baab86b
EF
1114/**
1115 * ata_noop_dev_select - Select device 0/1 on ATA bus
1116 * @ap: ATA channel to manipulate
1117 * @device: ATA device (numbered from zero) to select
1118 *
1119 * This function performs no actual function.
1120 *
1121 * May be used as the dev_select() entry in ata_port_operations.
1122 *
1123 * LOCKING:
1124 * caller.
1125 */
1da177e4
LT
1126void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1127{
1128}
1129
0baab86b 1130
1da177e4
LT
1131/**
1132 * ata_std_dev_select - Select device 0/1 on ATA bus
1133 * @ap: ATA channel to manipulate
1134 * @device: ATA device (numbered from zero) to select
1135 *
1136 * Use the method defined in the ATA specification to
1137 * make either device 0, or device 1, active on the
0baab86b
EF
1138 * ATA channel. Works with both PIO and MMIO.
1139 *
1140 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1141 *
1142 * LOCKING:
1143 * caller.
1144 */
1145
1146void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1147{
1148 u8 tmp;
1149
1150 if (device == 0)
1151 tmp = ATA_DEVICE_OBS;
1152 else
1153 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1154
0d5ff566 1155 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1156 ata_pause(ap); /* needed; also flushes, for mmio */
1157}
1158
1159/**
1160 * ata_dev_select - Select device 0/1 on ATA bus
1161 * @ap: ATA channel to manipulate
1162 * @device: ATA device (numbered from zero) to select
1163 * @wait: non-zero to wait for Status register BSY bit to clear
1164 * @can_sleep: non-zero if context allows sleeping
1165 *
1166 * Use the method defined in the ATA specification to
1167 * make either device 0, or device 1, active on the
1168 * ATA channel.
1169 *
1170 * This is a high-level version of ata_std_dev_select(),
1171 * which additionally provides the services of inserting
1172 * the proper pauses and status polling, where needed.
1173 *
1174 * LOCKING:
1175 * caller.
1176 */
1177
1178void ata_dev_select(struct ata_port *ap, unsigned int device,
1179 unsigned int wait, unsigned int can_sleep)
1180{
88574551 1181 if (ata_msg_probe(ap))
44877b4e
TH
1182 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1183 "device %u, wait %u\n", device, wait);
1da177e4
LT
1184
1185 if (wait)
1186 ata_wait_idle(ap);
1187
1188 ap->ops->dev_select(ap, device);
1189
1190 if (wait) {
9af5c9c9 1191 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1192 msleep(150);
1193 ata_wait_idle(ap);
1194 }
1195}
1196
1197/**
1198 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1199 * @id: IDENTIFY DEVICE page to dump
1da177e4 1200 *
0bd3300a
TH
1201 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1202 * page.
1da177e4
LT
1203 *
1204 * LOCKING:
1205 * caller.
1206 */
1207
0bd3300a 1208static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1209{
1210 DPRINTK("49==0x%04x "
1211 "53==0x%04x "
1212 "63==0x%04x "
1213 "64==0x%04x "
1214 "75==0x%04x \n",
0bd3300a
TH
1215 id[49],
1216 id[53],
1217 id[63],
1218 id[64],
1219 id[75]);
1da177e4
LT
1220 DPRINTK("80==0x%04x "
1221 "81==0x%04x "
1222 "82==0x%04x "
1223 "83==0x%04x "
1224 "84==0x%04x \n",
0bd3300a
TH
1225 id[80],
1226 id[81],
1227 id[82],
1228 id[83],
1229 id[84]);
1da177e4
LT
1230 DPRINTK("88==0x%04x "
1231 "93==0x%04x\n",
0bd3300a
TH
1232 id[88],
1233 id[93]);
1da177e4
LT
1234}
1235
cb95d562
TH
1236/**
1237 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1238 * @id: IDENTIFY data to compute xfer mask from
1239 *
1240 * Compute the xfermask for this device. This is not as trivial
1241 * as it seems if we must consider early devices correctly.
1242 *
1243 * FIXME: pre IDE drive timing (do we care ?).
1244 *
1245 * LOCKING:
1246 * None.
1247 *
1248 * RETURNS:
1249 * Computed xfermask
1250 */
1251static unsigned int ata_id_xfermask(const u16 *id)
1252{
1253 unsigned int pio_mask, mwdma_mask, udma_mask;
1254
1255 /* Usual case. Word 53 indicates word 64 is valid */
1256 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1257 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1258 pio_mask <<= 3;
1259 pio_mask |= 0x7;
1260 } else {
1261 /* If word 64 isn't valid then Word 51 high byte holds
1262 * the PIO timing number for the maximum. Turn it into
1263 * a mask.
1264 */
7a0f1c8a 1265 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1266 if (mode < 5) /* Valid PIO range */
1267 pio_mask = (2 << mode) - 1;
1268 else
1269 pio_mask = 1;
cb95d562
TH
1270
1271 /* But wait.. there's more. Design your standards by
1272 * committee and you too can get a free iordy field to
1273 * process. However its the speeds not the modes that
1274 * are supported... Note drivers using the timing API
1275 * will get this right anyway
1276 */
1277 }
1278
1279 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1280
b352e57d
AC
1281 if (ata_id_is_cfa(id)) {
1282 /*
1283 * Process compact flash extended modes
1284 */
1285 int pio = id[163] & 0x7;
1286 int dma = (id[163] >> 3) & 7;
1287
1288 if (pio)
1289 pio_mask |= (1 << 5);
1290 if (pio > 1)
1291 pio_mask |= (1 << 6);
1292 if (dma)
1293 mwdma_mask |= (1 << 3);
1294 if (dma > 1)
1295 mwdma_mask |= (1 << 4);
1296 }
1297
fb21f0d0
TH
1298 udma_mask = 0;
1299 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1300 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1301
1302 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1303}
1304
86e45b6b
TH
1305/**
1306 * ata_port_queue_task - Queue port_task
1307 * @ap: The ata_port to queue port_task for
e2a7f77a 1308 * @fn: workqueue function to be scheduled
65f27f38 1309 * @data: data for @fn to use
e2a7f77a 1310 * @delay: delay time for workqueue function
86e45b6b
TH
1311 *
1312 * Schedule @fn(@data) for execution after @delay jiffies using
1313 * port_task. There is one port_task per port and it's the
1314 * user(low level driver)'s responsibility to make sure that only
1315 * one task is active at any given time.
1316 *
1317 * libata core layer takes care of synchronization between
1318 * port_task and EH. ata_port_queue_task() may be ignored for EH
1319 * synchronization.
1320 *
1321 * LOCKING:
1322 * Inherited from caller.
1323 */
65f27f38 1324void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1325 unsigned long delay)
1326{
65f27f38
DH
1327 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1328 ap->port_task_data = data;
86e45b6b 1329
45a66c1c
ON
1330 /* may fail if ata_port_flush_task() in progress */
1331 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1332}
1333
1334/**
1335 * ata_port_flush_task - Flush port_task
1336 * @ap: The ata_port to flush port_task for
1337 *
1338 * After this function completes, port_task is guranteed not to
1339 * be running or scheduled.
1340 *
1341 * LOCKING:
1342 * Kernel thread context (may sleep)
1343 */
1344void ata_port_flush_task(struct ata_port *ap)
1345{
86e45b6b
TH
1346 DPRINTK("ENTER\n");
1347
45a66c1c 1348 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1349
0dd4b21f
BP
1350 if (ata_msg_ctl(ap))
1351 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1352}
1353
7102d230 1354static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1355{
77853bf2 1356 struct completion *waiting = qc->private_data;
a2a7a662 1357
a2a7a662 1358 complete(waiting);
a2a7a662
TH
1359}
1360
1361/**
2432697b 1362 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1363 * @dev: Device to which the command is sent
1364 * @tf: Taskfile registers for the command and the result
d69cf37d 1365 * @cdb: CDB for packet command
a2a7a662 1366 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1367 * @sg: sg list for the data buffer of the command
1368 * @n_elem: Number of sg entries
a2a7a662
TH
1369 *
1370 * Executes libata internal command with timeout. @tf contains
1371 * command on entry and result on return. Timeout and error
1372 * conditions are reported via return value. No recovery action
1373 * is taken after a command times out. It's caller's duty to
1374 * clean up after timeout.
1375 *
1376 * LOCKING:
1377 * None. Should be called with kernel context, might sleep.
551e8889
TH
1378 *
1379 * RETURNS:
1380 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1381 */
2432697b
TH
1382unsigned ata_exec_internal_sg(struct ata_device *dev,
1383 struct ata_taskfile *tf, const u8 *cdb,
1384 int dma_dir, struct scatterlist *sg,
1385 unsigned int n_elem)
a2a7a662 1386{
9af5c9c9
TH
1387 struct ata_link *link = dev->link;
1388 struct ata_port *ap = link->ap;
a2a7a662
TH
1389 u8 command = tf->command;
1390 struct ata_queued_cmd *qc;
2ab7db1f 1391 unsigned int tag, preempted_tag;
dedaf2b0 1392 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1393 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1394 unsigned long flags;
77853bf2 1395 unsigned int err_mask;
d95a717f 1396 int rc;
a2a7a662 1397
ba6a1308 1398 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1399
e3180499 1400 /* no internal command while frozen */
b51e9e5d 1401 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1402 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1403 return AC_ERR_SYSTEM;
1404 }
1405
2ab7db1f 1406 /* initialize internal qc */
a2a7a662 1407
2ab7db1f
TH
1408 /* XXX: Tag 0 is used for drivers with legacy EH as some
1409 * drivers choke if any other tag is given. This breaks
1410 * ata_tag_internal() test for those drivers. Don't use new
1411 * EH stuff without converting to it.
1412 */
1413 if (ap->ops->error_handler)
1414 tag = ATA_TAG_INTERNAL;
1415 else
1416 tag = 0;
1417
6cec4a39 1418 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1419 BUG();
f69499f4 1420 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1421
1422 qc->tag = tag;
1423 qc->scsicmd = NULL;
1424 qc->ap = ap;
1425 qc->dev = dev;
1426 ata_qc_reinit(qc);
1427
9af5c9c9
TH
1428 preempted_tag = link->active_tag;
1429 preempted_sactive = link->sactive;
dedaf2b0 1430 preempted_qc_active = ap->qc_active;
9af5c9c9
TH
1431 link->active_tag = ATA_TAG_POISON;
1432 link->sactive = 0;
dedaf2b0 1433 ap->qc_active = 0;
2ab7db1f
TH
1434
1435 /* prepare & issue qc */
a2a7a662 1436 qc->tf = *tf;
d69cf37d
TH
1437 if (cdb)
1438 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1439 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1440 qc->dma_dir = dma_dir;
1441 if (dma_dir != DMA_NONE) {
2432697b
TH
1442 unsigned int i, buflen = 0;
1443
1444 for (i = 0; i < n_elem; i++)
1445 buflen += sg[i].length;
1446
1447 ata_sg_init(qc, sg, n_elem);
49c80429 1448 qc->nbytes = buflen;
a2a7a662
TH
1449 }
1450
77853bf2 1451 qc->private_data = &wait;
a2a7a662
TH
1452 qc->complete_fn = ata_qc_complete_internal;
1453
8e0e694a 1454 ata_qc_issue(qc);
a2a7a662 1455
ba6a1308 1456 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1457
a8601e5f 1458 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1459
1460 ata_port_flush_task(ap);
41ade50c 1461
d95a717f 1462 if (!rc) {
ba6a1308 1463 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1464
1465 /* We're racing with irq here. If we lose, the
1466 * following test prevents us from completing the qc
d95a717f
TH
1467 * twice. If we win, the port is frozen and will be
1468 * cleaned up by ->post_internal_cmd().
a2a7a662 1469 */
77853bf2 1470 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1471 qc->err_mask |= AC_ERR_TIMEOUT;
1472
1473 if (ap->ops->error_handler)
1474 ata_port_freeze(ap);
1475 else
1476 ata_qc_complete(qc);
f15a1daf 1477
0dd4b21f
BP
1478 if (ata_msg_warn(ap))
1479 ata_dev_printk(dev, KERN_WARNING,
88574551 1480 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1481 }
1482
ba6a1308 1483 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1484 }
1485
d95a717f
TH
1486 /* do post_internal_cmd */
1487 if (ap->ops->post_internal_cmd)
1488 ap->ops->post_internal_cmd(qc);
1489
a51d644a
TH
1490 /* perform minimal error analysis */
1491 if (qc->flags & ATA_QCFLAG_FAILED) {
1492 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1493 qc->err_mask |= AC_ERR_DEV;
1494
1495 if (!qc->err_mask)
1496 qc->err_mask |= AC_ERR_OTHER;
1497
1498 if (qc->err_mask & ~AC_ERR_OTHER)
1499 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1500 }
1501
15869303 1502 /* finish up */
ba6a1308 1503 spin_lock_irqsave(ap->lock, flags);
15869303 1504
e61e0672 1505 *tf = qc->result_tf;
77853bf2
TH
1506 err_mask = qc->err_mask;
1507
1508 ata_qc_free(qc);
9af5c9c9
TH
1509 link->active_tag = preempted_tag;
1510 link->sactive = preempted_sactive;
dedaf2b0 1511 ap->qc_active = preempted_qc_active;
77853bf2 1512
1f7dd3e9
TH
1513 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1514 * Until those drivers are fixed, we detect the condition
1515 * here, fail the command with AC_ERR_SYSTEM and reenable the
1516 * port.
1517 *
1518 * Note that this doesn't change any behavior as internal
1519 * command failure results in disabling the device in the
1520 * higher layer for LLDDs without new reset/EH callbacks.
1521 *
1522 * Kill the following code as soon as those drivers are fixed.
1523 */
198e0fed 1524 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1525 err_mask |= AC_ERR_SYSTEM;
1526 ata_port_probe(ap);
1527 }
1528
ba6a1308 1529 spin_unlock_irqrestore(ap->lock, flags);
15869303 1530
77853bf2 1531 return err_mask;
a2a7a662
TH
1532}
1533
2432697b 1534/**
33480a0e 1535 * ata_exec_internal - execute libata internal command
2432697b
TH
1536 * @dev: Device to which the command is sent
1537 * @tf: Taskfile registers for the command and the result
1538 * @cdb: CDB for packet command
1539 * @dma_dir: Data tranfer direction of the command
1540 * @buf: Data buffer of the command
1541 * @buflen: Length of data buffer
1542 *
1543 * Wrapper around ata_exec_internal_sg() which takes simple
1544 * buffer instead of sg list.
1545 *
1546 * LOCKING:
1547 * None. Should be called with kernel context, might sleep.
1548 *
1549 * RETURNS:
1550 * Zero on success, AC_ERR_* mask on failure
1551 */
1552unsigned ata_exec_internal(struct ata_device *dev,
1553 struct ata_taskfile *tf, const u8 *cdb,
1554 int dma_dir, void *buf, unsigned int buflen)
1555{
33480a0e
TH
1556 struct scatterlist *psg = NULL, sg;
1557 unsigned int n_elem = 0;
2432697b 1558
33480a0e
TH
1559 if (dma_dir != DMA_NONE) {
1560 WARN_ON(!buf);
1561 sg_init_one(&sg, buf, buflen);
1562 psg = &sg;
1563 n_elem++;
1564 }
2432697b 1565
33480a0e 1566 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1567}
1568
977e6b9f
TH
1569/**
1570 * ata_do_simple_cmd - execute simple internal command
1571 * @dev: Device to which the command is sent
1572 * @cmd: Opcode to execute
1573 *
1574 * Execute a 'simple' command, that only consists of the opcode
1575 * 'cmd' itself, without filling any other registers
1576 *
1577 * LOCKING:
1578 * Kernel thread context (may sleep).
1579 *
1580 * RETURNS:
1581 * Zero on success, AC_ERR_* mask on failure
e58eb583 1582 */
77b08fb5 1583unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1584{
1585 struct ata_taskfile tf;
e58eb583
TH
1586
1587 ata_tf_init(dev, &tf);
1588
1589 tf.command = cmd;
1590 tf.flags |= ATA_TFLAG_DEVICE;
1591 tf.protocol = ATA_PROT_NODATA;
1592
977e6b9f 1593 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1594}
1595
1bc4ccff
AC
1596/**
1597 * ata_pio_need_iordy - check if iordy needed
1598 * @adev: ATA device
1599 *
1600 * Check if the current speed of the device requires IORDY. Used
1601 * by various controllers for chip configuration.
1602 */
a617c09f 1603
1bc4ccff
AC
1604unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1605{
432729f0
AC
1606 /* Controller doesn't support IORDY. Probably a pointless check
1607 as the caller should know this */
9af5c9c9 1608 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1609 return 0;
432729f0
AC
1610 /* PIO3 and higher it is mandatory */
1611 if (adev->pio_mode > XFER_PIO_2)
1612 return 1;
1613 /* We turn it on when possible */
1614 if (ata_id_has_iordy(adev->id))
1bc4ccff 1615 return 1;
432729f0
AC
1616 return 0;
1617}
2e9edbf8 1618
432729f0
AC
1619/**
1620 * ata_pio_mask_no_iordy - Return the non IORDY mask
1621 * @adev: ATA device
1622 *
1623 * Compute the highest mode possible if we are not using iordy. Return
1624 * -1 if no iordy mode is available.
1625 */
a617c09f 1626
432729f0
AC
1627static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1628{
1bc4ccff 1629 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1630 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1631 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1632 /* Is the speed faster than the drive allows non IORDY ? */
1633 if (pio) {
1634 /* This is cycle times not frequency - watch the logic! */
1635 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1636 return 3 << ATA_SHIFT_PIO;
1637 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1638 }
1639 }
432729f0 1640 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1641}
1642
1da177e4 1643/**
49016aca 1644 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1645 * @dev: target device
1646 * @p_class: pointer to class of the target device (may be changed)
bff04647 1647 * @flags: ATA_READID_* flags
fe635c7e 1648 * @id: buffer to read IDENTIFY data into
1da177e4 1649 *
49016aca
TH
1650 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1651 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1652 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1653 * for pre-ATA4 drives.
1da177e4 1654 *
50a99018
AC
1655 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1656 * now we abort if we hit that case.
1657 *
1da177e4 1658 * LOCKING:
49016aca
TH
1659 * Kernel thread context (may sleep)
1660 *
1661 * RETURNS:
1662 * 0 on success, -errno otherwise.
1da177e4 1663 */
a9beec95 1664int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1665 unsigned int flags, u16 *id)
1da177e4 1666{
9af5c9c9 1667 struct ata_port *ap = dev->link->ap;
49016aca 1668 unsigned int class = *p_class;
a0123703 1669 struct ata_taskfile tf;
49016aca
TH
1670 unsigned int err_mask = 0;
1671 const char *reason;
54936f8b 1672 int may_fallback = 1, tried_spinup = 0;
49016aca 1673 int rc;
1da177e4 1674
0dd4b21f 1675 if (ata_msg_ctl(ap))
44877b4e 1676 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1677
49016aca 1678 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1679 retry:
3373efd8 1680 ata_tf_init(dev, &tf);
a0123703 1681
49016aca
TH
1682 switch (class) {
1683 case ATA_DEV_ATA:
a0123703 1684 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1685 break;
1686 case ATA_DEV_ATAPI:
a0123703 1687 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1688 break;
1689 default:
1690 rc = -ENODEV;
1691 reason = "unsupported class";
1692 goto err_out;
1da177e4
LT
1693 }
1694
a0123703 1695 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1696
1697 /* Some devices choke if TF registers contain garbage. Make
1698 * sure those are properly initialized.
1699 */
1700 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1701
1702 /* Device presence detection is unreliable on some
1703 * controllers. Always poll IDENTIFY if available.
1704 */
1705 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1706
3373efd8 1707 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1708 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1709 if (err_mask) {
800b3996 1710 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1711 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1712 ap->print_id, dev->devno);
55a8e2c8
TH
1713 return -ENOENT;
1714 }
1715
54936f8b
TH
1716 /* Device or controller might have reported the wrong
1717 * device class. Give a shot at the other IDENTIFY if
1718 * the current one is aborted by the device.
1719 */
1720 if (may_fallback &&
1721 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1722 may_fallback = 0;
1723
1724 if (class == ATA_DEV_ATA)
1725 class = ATA_DEV_ATAPI;
1726 else
1727 class = ATA_DEV_ATA;
1728 goto retry;
1729 }
1730
49016aca
TH
1731 rc = -EIO;
1732 reason = "I/O error";
1da177e4
LT
1733 goto err_out;
1734 }
1735
54936f8b
TH
1736 /* Falling back doesn't make sense if ID data was read
1737 * successfully at least once.
1738 */
1739 may_fallback = 0;
1740
49016aca 1741 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1742
49016aca 1743 /* sanity check */
a4f5749b 1744 rc = -EINVAL;
6070068b 1745 reason = "device reports invalid type";
a4f5749b
TH
1746
1747 if (class == ATA_DEV_ATA) {
1748 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1749 goto err_out;
1750 } else {
1751 if (ata_id_is_ata(id))
1752 goto err_out;
49016aca
TH
1753 }
1754
169439c2
ML
1755 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1756 tried_spinup = 1;
1757 /*
1758 * Drive powered-up in standby mode, and requires a specific
1759 * SET_FEATURES spin-up subcommand before it will accept
1760 * anything other than the original IDENTIFY command.
1761 */
1762 ata_tf_init(dev, &tf);
1763 tf.command = ATA_CMD_SET_FEATURES;
1764 tf.feature = SETFEATURES_SPINUP;
1765 tf.protocol = ATA_PROT_NODATA;
1766 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1767 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
fb0582f9 1768 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1769 rc = -EIO;
1770 reason = "SPINUP failed";
1771 goto err_out;
1772 }
1773 /*
1774 * If the drive initially returned incomplete IDENTIFY info,
1775 * we now must reissue the IDENTIFY command.
1776 */
1777 if (id[2] == 0x37c8)
1778 goto retry;
1779 }
1780
bff04647 1781 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1782 /*
1783 * The exact sequence expected by certain pre-ATA4 drives is:
1784 * SRST RESET
50a99018
AC
1785 * IDENTIFY (optional in early ATA)
1786 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1787 * anything else..
1788 * Some drives were very specific about that exact sequence.
50a99018
AC
1789 *
1790 * Note that ATA4 says lba is mandatory so the second check
1791 * shoud never trigger.
49016aca
TH
1792 */
1793 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1794 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1795 if (err_mask) {
1796 rc = -EIO;
1797 reason = "INIT_DEV_PARAMS failed";
1798 goto err_out;
1799 }
1800
1801 /* current CHS translation info (id[53-58]) might be
1802 * changed. reread the identify device info.
1803 */
bff04647 1804 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1805 goto retry;
1806 }
1807 }
1808
1809 *p_class = class;
fe635c7e 1810
49016aca
TH
1811 return 0;
1812
1813 err_out:
88574551 1814 if (ata_msg_warn(ap))
0dd4b21f 1815 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1816 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1817 return rc;
1818}
1819
3373efd8 1820static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1821{
9af5c9c9
TH
1822 struct ata_port *ap = dev->link->ap;
1823 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1824}
1825
a6e6ce8e
TH
1826static void ata_dev_config_ncq(struct ata_device *dev,
1827 char *desc, size_t desc_sz)
1828{
9af5c9c9 1829 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
1830 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1831
1832 if (!ata_id_has_ncq(dev->id)) {
1833 desc[0] = '\0';
1834 return;
1835 }
75683fe7 1836 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
1837 snprintf(desc, desc_sz, "NCQ (not used)");
1838 return;
1839 }
a6e6ce8e 1840 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1841 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1842 dev->flags |= ATA_DFLAG_NCQ;
1843 }
1844
1845 if (hdepth >= ddepth)
1846 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1847 else
1848 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1849}
1850
49016aca 1851/**
ffeae418 1852 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1853 * @dev: Target device to configure
1854 *
1855 * Configure @dev according to @dev->id. Generic and low-level
1856 * driver specific fixups are also applied.
49016aca
TH
1857 *
1858 * LOCKING:
ffeae418
TH
1859 * Kernel thread context (may sleep)
1860 *
1861 * RETURNS:
1862 * 0 on success, -errno otherwise
49016aca 1863 */
efdaedc4 1864int ata_dev_configure(struct ata_device *dev)
49016aca 1865{
9af5c9c9
TH
1866 struct ata_port *ap = dev->link->ap;
1867 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 1868 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1869 const u16 *id = dev->id;
ff8854b2 1870 unsigned int xfer_mask;
b352e57d 1871 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1872 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1873 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1874 int rc;
49016aca 1875
0dd4b21f 1876 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1877 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1878 __FUNCTION__);
ffeae418 1879 return 0;
49016aca
TH
1880 }
1881
0dd4b21f 1882 if (ata_msg_probe(ap))
44877b4e 1883 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1884
75683fe7
TH
1885 /* set horkage */
1886 dev->horkage |= ata_dev_blacklisted(dev);
1887
6746544c
TH
1888 /* let ACPI work its magic */
1889 rc = ata_acpi_on_devcfg(dev);
1890 if (rc)
1891 return rc;
08573a86 1892
05027adc
TH
1893 /* massage HPA, do it early as it might change IDENTIFY data */
1894 rc = ata_hpa_resize(dev);
1895 if (rc)
1896 return rc;
1897
c39f5ebe 1898 /* print device capabilities */
0dd4b21f 1899 if (ata_msg_probe(ap))
88574551
TH
1900 ata_dev_printk(dev, KERN_DEBUG,
1901 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1902 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1903 __FUNCTION__,
f15a1daf
TH
1904 id[49], id[82], id[83], id[84],
1905 id[85], id[86], id[87], id[88]);
c39f5ebe 1906
208a9933 1907 /* initialize to-be-configured parameters */
ea1dd4e1 1908 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1909 dev->max_sectors = 0;
1910 dev->cdb_len = 0;
1911 dev->n_sectors = 0;
1912 dev->cylinders = 0;
1913 dev->heads = 0;
1914 dev->sectors = 0;
1915
1da177e4
LT
1916 /*
1917 * common ATA, ATAPI feature tests
1918 */
1919
ff8854b2 1920 /* find max transfer mode; for printk only */
1148c3a7 1921 xfer_mask = ata_id_xfermask(id);
1da177e4 1922
0dd4b21f
BP
1923 if (ata_msg_probe(ap))
1924 ata_dump_id(id);
1da177e4 1925
ef143d57
AL
1926 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1927 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1928 sizeof(fwrevbuf));
1929
1930 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1931 sizeof(modelbuf));
1932
1da177e4
LT
1933 /* ATA-specific feature tests */
1934 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1935 if (ata_id_is_cfa(id)) {
1936 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1937 ata_dev_printk(dev, KERN_WARNING,
1938 "supports DRM functions and may "
1939 "not be fully accessable.\n");
b352e57d
AC
1940 snprintf(revbuf, 7, "CFA");
1941 }
1942 else
1943 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1944
1148c3a7 1945 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1946
3f64f565
EM
1947 if (dev->id[59] & 0x100)
1948 dev->multi_count = dev->id[59] & 0xff;
1949
1148c3a7 1950 if (ata_id_has_lba(id)) {
4c2d721a 1951 const char *lba_desc;
a6e6ce8e 1952 char ncq_desc[20];
8bf62ece 1953
4c2d721a
TH
1954 lba_desc = "LBA";
1955 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1956 if (ata_id_has_lba48(id)) {
8bf62ece 1957 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1958 lba_desc = "LBA48";
6fc49adb
TH
1959
1960 if (dev->n_sectors >= (1UL << 28) &&
1961 ata_id_has_flush_ext(id))
1962 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1963 }
8bf62ece 1964
a6e6ce8e
TH
1965 /* config NCQ */
1966 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1967
8bf62ece 1968 /* print device info to dmesg */
3f64f565
EM
1969 if (ata_msg_drv(ap) && print_info) {
1970 ata_dev_printk(dev, KERN_INFO,
1971 "%s: %s, %s, max %s\n",
1972 revbuf, modelbuf, fwrevbuf,
1973 ata_mode_string(xfer_mask));
1974 ata_dev_printk(dev, KERN_INFO,
1975 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1976 (unsigned long long)dev->n_sectors,
3f64f565
EM
1977 dev->multi_count, lba_desc, ncq_desc);
1978 }
ffeae418 1979 } else {
8bf62ece
AL
1980 /* CHS */
1981
1982 /* Default translation */
1148c3a7
TH
1983 dev->cylinders = id[1];
1984 dev->heads = id[3];
1985 dev->sectors = id[6];
8bf62ece 1986
1148c3a7 1987 if (ata_id_current_chs_valid(id)) {
8bf62ece 1988 /* Current CHS translation is valid. */
1148c3a7
TH
1989 dev->cylinders = id[54];
1990 dev->heads = id[55];
1991 dev->sectors = id[56];
8bf62ece
AL
1992 }
1993
1994 /* print device info to dmesg */
3f64f565 1995 if (ata_msg_drv(ap) && print_info) {
88574551 1996 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1997 "%s: %s, %s, max %s\n",
1998 revbuf, modelbuf, fwrevbuf,
1999 ata_mode_string(xfer_mask));
a84471fe 2000 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2001 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2002 (unsigned long long)dev->n_sectors,
2003 dev->multi_count, dev->cylinders,
2004 dev->heads, dev->sectors);
2005 }
07f6f7d0
AL
2006 }
2007
6e7846e9 2008 dev->cdb_len = 16;
1da177e4
LT
2009 }
2010
2011 /* ATAPI-specific feature tests */
2c13b7ce 2012 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2013 const char *cdb_intr_string = "";
2014 const char *atapi_an_string = "";
08a556db 2015
1148c3a7 2016 rc = atapi_cdb_len(id);
1da177e4 2017 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2018 if (ata_msg_warn(ap))
88574551
TH
2019 ata_dev_printk(dev, KERN_WARNING,
2020 "unsupported CDB len\n");
ffeae418 2021 rc = -EINVAL;
1da177e4
LT
2022 goto err_out_nosup;
2023 }
6e7846e9 2024 dev->cdb_len = (unsigned int) rc;
1da177e4 2025
9f45cbd3
KCA
2026 /*
2027 * check to see if this ATAPI device supports
2028 * Asynchronous Notification
2029 */
854c73a2
TH
2030 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id)) {
2031 unsigned int err_mask;
2032
9f45cbd3 2033 /* issue SET feature command to turn this on */
854c73a2
TH
2034 err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2035 if (err_mask)
9f45cbd3 2036 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2037 "failed to enable ATAPI AN "
2038 "(err_mask=0x%x)\n", err_mask);
2039 else {
9f45cbd3 2040 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2041 atapi_an_string = ", ATAPI AN";
2042 }
9f45cbd3
KCA
2043 }
2044
08a556db 2045 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2046 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2047 cdb_intr_string = ", CDB intr";
2048 }
312f7da2 2049
1da177e4 2050 /* print device info to dmesg */
5afc8142 2051 if (ata_msg_drv(ap) && print_info)
ef143d57 2052 ata_dev_printk(dev, KERN_INFO,
854c73a2 2053 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2054 modelbuf, fwrevbuf,
12436c30 2055 ata_mode_string(xfer_mask),
854c73a2 2056 cdb_intr_string, atapi_an_string);
1da177e4
LT
2057 }
2058
914ed354
TH
2059 /* determine max_sectors */
2060 dev->max_sectors = ATA_MAX_SECTORS;
2061 if (dev->flags & ATA_DFLAG_LBA48)
2062 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2063
93590859
AC
2064 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2065 /* Let the user know. We don't want to disallow opens for
2066 rescue purposes, or in case the vendor is just a blithering
2067 idiot */
2068 if (print_info) {
2069 ata_dev_printk(dev, KERN_WARNING,
2070"Drive reports diagnostics failure. This may indicate a drive\n");
2071 ata_dev_printk(dev, KERN_WARNING,
2072"fault or invalid emulation. Contact drive vendor for information.\n");
2073 }
2074 }
2075
4b2f3ede 2076 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2077 if (ata_dev_knobble(dev)) {
5afc8142 2078 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2079 ata_dev_printk(dev, KERN_INFO,
2080 "applying bridge limits\n");
5a529139 2081 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2082 dev->max_sectors = ATA_MAX_SECTORS;
2083 }
2084
75683fe7 2085 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2086 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2087 dev->max_sectors);
18d6e9d5 2088
4b2f3ede 2089 if (ap->ops->dev_config)
cd0d3bbc 2090 ap->ops->dev_config(dev);
4b2f3ede 2091
0dd4b21f
BP
2092 if (ata_msg_probe(ap))
2093 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2094 __FUNCTION__, ata_chk_status(ap));
ffeae418 2095 return 0;
1da177e4
LT
2096
2097err_out_nosup:
0dd4b21f 2098 if (ata_msg_probe(ap))
88574551
TH
2099 ata_dev_printk(dev, KERN_DEBUG,
2100 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2101 return rc;
1da177e4
LT
2102}
2103
be0d18df 2104/**
2e41e8e6 2105 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2106 * @ap: port
2107 *
2e41e8e6 2108 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2109 * detection.
2110 */
2111
2112int ata_cable_40wire(struct ata_port *ap)
2113{
2114 return ATA_CBL_PATA40;
2115}
2116
2117/**
2e41e8e6 2118 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2119 * @ap: port
2120 *
2e41e8e6 2121 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2122 * detection.
2123 */
2124
2125int ata_cable_80wire(struct ata_port *ap)
2126{
2127 return ATA_CBL_PATA80;
2128}
2129
2130/**
2131 * ata_cable_unknown - return unknown PATA cable.
2132 * @ap: port
2133 *
2134 * Helper method for drivers which have no PATA cable detection.
2135 */
2136
2137int ata_cable_unknown(struct ata_port *ap)
2138{
2139 return ATA_CBL_PATA_UNK;
2140}
2141
2142/**
2143 * ata_cable_sata - return SATA cable type
2144 * @ap: port
2145 *
2146 * Helper method for drivers which have SATA cables
2147 */
2148
2149int ata_cable_sata(struct ata_port *ap)
2150{
2151 return ATA_CBL_SATA;
2152}
2153
1da177e4
LT
2154/**
2155 * ata_bus_probe - Reset and probe ATA bus
2156 * @ap: Bus to probe
2157 *
0cba632b
JG
2158 * Master ATA bus probing function. Initiates a hardware-dependent
2159 * bus reset, then attempts to identify any devices found on
2160 * the bus.
2161 *
1da177e4 2162 * LOCKING:
0cba632b 2163 * PCI/etc. bus probe sem.
1da177e4
LT
2164 *
2165 * RETURNS:
96072e69 2166 * Zero on success, negative errno otherwise.
1da177e4
LT
2167 */
2168
80289167 2169int ata_bus_probe(struct ata_port *ap)
1da177e4 2170{
28ca5c57 2171 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2172 int tries[ATA_MAX_DEVICES];
f58229f8 2173 int rc;
e82cbdb9 2174 struct ata_device *dev;
1da177e4 2175
28ca5c57 2176 ata_port_probe(ap);
c19ba8af 2177
f58229f8
TH
2178 ata_link_for_each_dev(dev, &ap->link)
2179 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2180
2181 retry:
2044470c 2182 /* reset and determine device classes */
52783c5d 2183 ap->ops->phy_reset(ap);
2061a47a 2184
f58229f8 2185 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2186 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2187 dev->class != ATA_DEV_UNKNOWN)
2188 classes[dev->devno] = dev->class;
2189 else
2190 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2191
52783c5d 2192 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2193 }
1da177e4 2194
52783c5d 2195 ata_port_probe(ap);
2044470c 2196
b6079ca4
AC
2197 /* after the reset the device state is PIO 0 and the controller
2198 state is undefined. Record the mode */
2199
f58229f8
TH
2200 ata_link_for_each_dev(dev, &ap->link)
2201 dev->pio_mode = XFER_PIO_0;
b6079ca4 2202
f31f0cc2
JG
2203 /* read IDENTIFY page and configure devices. We have to do the identify
2204 specific sequence bass-ackwards so that PDIAG- is released by
2205 the slave device */
2206
f58229f8
TH
2207 ata_link_for_each_dev(dev, &ap->link) {
2208 if (tries[dev->devno])
2209 dev->class = classes[dev->devno];
ffeae418 2210
14d2bac1 2211 if (!ata_dev_enabled(dev))
ffeae418 2212 continue;
ffeae418 2213
bff04647
TH
2214 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2215 dev->id);
14d2bac1
TH
2216 if (rc)
2217 goto fail;
f31f0cc2
JG
2218 }
2219
be0d18df
AC
2220 /* Now ask for the cable type as PDIAG- should have been released */
2221 if (ap->ops->cable_detect)
2222 ap->cbl = ap->ops->cable_detect(ap);
2223
614fe29b
AC
2224 /* We may have SATA bridge glue hiding here irrespective of the
2225 reported cable types and sensed types */
2226 ata_link_for_each_dev(dev, &ap->link) {
2227 if (!ata_dev_enabled(dev))
2228 continue;
2229 /* SATA drives indicate we have a bridge. We don't know which
2230 end of the link the bridge is which is a problem */
2231 if (ata_id_is_sata(dev->id))
2232 ap->cbl = ATA_CBL_SATA;
2233 }
2234
f31f0cc2
JG
2235 /* After the identify sequence we can now set up the devices. We do
2236 this in the normal order so that the user doesn't get confused */
2237
f58229f8 2238 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2239 if (!ata_dev_enabled(dev))
2240 continue;
14d2bac1 2241
9af5c9c9 2242 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2243 rc = ata_dev_configure(dev);
9af5c9c9 2244 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2245 if (rc)
2246 goto fail;
1da177e4
LT
2247 }
2248
e82cbdb9 2249 /* configure transfer mode */
0260731f 2250 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2251 if (rc)
51713d35 2252 goto fail;
1da177e4 2253
f58229f8
TH
2254 ata_link_for_each_dev(dev, &ap->link)
2255 if (ata_dev_enabled(dev))
e82cbdb9 2256 return 0;
1da177e4 2257
e82cbdb9
TH
2258 /* no device present, disable port */
2259 ata_port_disable(ap);
96072e69 2260 return -ENODEV;
14d2bac1
TH
2261
2262 fail:
4ae72a1e
TH
2263 tries[dev->devno]--;
2264
14d2bac1
TH
2265 switch (rc) {
2266 case -EINVAL:
4ae72a1e 2267 /* eeek, something went very wrong, give up */
14d2bac1
TH
2268 tries[dev->devno] = 0;
2269 break;
4ae72a1e
TH
2270
2271 case -ENODEV:
2272 /* give it just one more chance */
2273 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2274 case -EIO:
4ae72a1e
TH
2275 if (tries[dev->devno] == 1) {
2276 /* This is the last chance, better to slow
2277 * down than lose it.
2278 */
936fd732 2279 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2280 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2281 }
14d2bac1
TH
2282 }
2283
4ae72a1e 2284 if (!tries[dev->devno])
3373efd8 2285 ata_dev_disable(dev);
ec573755 2286
14d2bac1 2287 goto retry;
1da177e4
LT
2288}
2289
2290/**
0cba632b
JG
2291 * ata_port_probe - Mark port as enabled
2292 * @ap: Port for which we indicate enablement
1da177e4 2293 *
0cba632b
JG
2294 * Modify @ap data structure such that the system
2295 * thinks that the entire port is enabled.
2296 *
cca3974e 2297 * LOCKING: host lock, or some other form of
0cba632b 2298 * serialization.
1da177e4
LT
2299 */
2300
2301void ata_port_probe(struct ata_port *ap)
2302{
198e0fed 2303 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2304}
2305
3be680b7
TH
2306/**
2307 * sata_print_link_status - Print SATA link status
936fd732 2308 * @link: SATA link to printk link status about
3be680b7
TH
2309 *
2310 * This function prints link speed and status of a SATA link.
2311 *
2312 * LOCKING:
2313 * None.
2314 */
936fd732 2315void sata_print_link_status(struct ata_link *link)
3be680b7 2316{
6d5f9732 2317 u32 sstatus, scontrol, tmp;
3be680b7 2318
936fd732 2319 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2320 return;
936fd732 2321 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2322
936fd732 2323 if (ata_link_online(link)) {
3be680b7 2324 tmp = (sstatus >> 4) & 0xf;
936fd732 2325 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2326 "SATA link up %s (SStatus %X SControl %X)\n",
2327 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2328 } else {
936fd732 2329 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2330 "SATA link down (SStatus %X SControl %X)\n",
2331 sstatus, scontrol);
3be680b7
TH
2332 }
2333}
2334
1da177e4 2335/**
780a87f7
JG
2336 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2337 * @ap: SATA port associated with target SATA PHY.
1da177e4 2338 *
780a87f7
JG
2339 * This function issues commands to standard SATA Sxxx
2340 * PHY registers, to wake up the phy (and device), and
2341 * clear any reset condition.
1da177e4
LT
2342 *
2343 * LOCKING:
0cba632b 2344 * PCI/etc. bus probe sem.
1da177e4
LT
2345 *
2346 */
2347void __sata_phy_reset(struct ata_port *ap)
2348{
936fd732 2349 struct ata_link *link = &ap->link;
1da177e4 2350 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2351 u32 sstatus;
1da177e4
LT
2352
2353 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2354 /* issue phy wake/reset */
936fd732 2355 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2356 /* Couldn't find anything in SATA I/II specs, but
2357 * AHCI-1.1 10.4.2 says at least 1 ms. */
2358 mdelay(1);
1da177e4 2359 }
81952c54 2360 /* phy wake/clear reset */
936fd732 2361 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2362
2363 /* wait for phy to become ready, if necessary */
2364 do {
2365 msleep(200);
936fd732 2366 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2367 if ((sstatus & 0xf) != 1)
2368 break;
2369 } while (time_before(jiffies, timeout));
2370
3be680b7 2371 /* print link status */
936fd732 2372 sata_print_link_status(link);
656563e3 2373
3be680b7 2374 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2375 if (!ata_link_offline(link))
1da177e4 2376 ata_port_probe(ap);
3be680b7 2377 else
1da177e4 2378 ata_port_disable(ap);
1da177e4 2379
198e0fed 2380 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2381 return;
2382
2383 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2384 ata_port_disable(ap);
2385 return;
2386 }
2387
2388 ap->cbl = ATA_CBL_SATA;
2389}
2390
2391/**
780a87f7
JG
2392 * sata_phy_reset - Reset SATA bus.
2393 * @ap: SATA port associated with target SATA PHY.
1da177e4 2394 *
780a87f7
JG
2395 * This function resets the SATA bus, and then probes
2396 * the bus for devices.
1da177e4
LT
2397 *
2398 * LOCKING:
0cba632b 2399 * PCI/etc. bus probe sem.
1da177e4
LT
2400 *
2401 */
2402void sata_phy_reset(struct ata_port *ap)
2403{
2404 __sata_phy_reset(ap);
198e0fed 2405 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2406 return;
2407 ata_bus_reset(ap);
2408}
2409
ebdfca6e
AC
2410/**
2411 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2412 * @adev: device
2413 *
2414 * Obtain the other device on the same cable, or if none is
2415 * present NULL is returned
2416 */
2e9edbf8 2417
3373efd8 2418struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2419{
9af5c9c9
TH
2420 struct ata_link *link = adev->link;
2421 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2422 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2423 return NULL;
2424 return pair;
2425}
2426
1da177e4 2427/**
780a87f7
JG
2428 * ata_port_disable - Disable port.
2429 * @ap: Port to be disabled.
1da177e4 2430 *
780a87f7
JG
2431 * Modify @ap data structure such that the system
2432 * thinks that the entire port is disabled, and should
2433 * never attempt to probe or communicate with devices
2434 * on this port.
2435 *
cca3974e 2436 * LOCKING: host lock, or some other form of
780a87f7 2437 * serialization.
1da177e4
LT
2438 */
2439
2440void ata_port_disable(struct ata_port *ap)
2441{
9af5c9c9
TH
2442 ap->link.device[0].class = ATA_DEV_NONE;
2443 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2444 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2445}
2446
1c3fae4d 2447/**
3c567b7d 2448 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2449 * @link: Link to adjust SATA spd limit for
1c3fae4d 2450 *
936fd732 2451 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2452 * function only adjusts the limit. The change must be applied
3c567b7d 2453 * using sata_set_spd().
1c3fae4d
TH
2454 *
2455 * LOCKING:
2456 * Inherited from caller.
2457 *
2458 * RETURNS:
2459 * 0 on success, negative errno on failure
2460 */
936fd732 2461int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2462{
81952c54
TH
2463 u32 sstatus, spd, mask;
2464 int rc, highbit;
1c3fae4d 2465
936fd732 2466 if (!sata_scr_valid(link))
008a7896
TH
2467 return -EOPNOTSUPP;
2468
2469 /* If SCR can be read, use it to determine the current SPD.
936fd732 2470 * If not, use cached value in link->sata_spd.
008a7896 2471 */
936fd732 2472 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2473 if (rc == 0)
2474 spd = (sstatus >> 4) & 0xf;
2475 else
936fd732 2476 spd = link->sata_spd;
1c3fae4d 2477
936fd732 2478 mask = link->sata_spd_limit;
1c3fae4d
TH
2479 if (mask <= 1)
2480 return -EINVAL;
008a7896
TH
2481
2482 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2483 highbit = fls(mask) - 1;
2484 mask &= ~(1 << highbit);
2485
008a7896
TH
2486 /* Mask off all speeds higher than or equal to the current
2487 * one. Force 1.5Gbps if current SPD is not available.
2488 */
2489 if (spd > 1)
2490 mask &= (1 << (spd - 1)) - 1;
2491 else
2492 mask &= 1;
2493
2494 /* were we already at the bottom? */
1c3fae4d
TH
2495 if (!mask)
2496 return -EINVAL;
2497
936fd732 2498 link->sata_spd_limit = mask;
1c3fae4d 2499
936fd732 2500 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2501 sata_spd_string(fls(mask)));
1c3fae4d
TH
2502
2503 return 0;
2504}
2505
936fd732 2506static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d
TH
2507{
2508 u32 spd, limit;
2509
936fd732 2510 if (link->sata_spd_limit == UINT_MAX)
1c3fae4d
TH
2511 limit = 0;
2512 else
936fd732 2513 limit = fls(link->sata_spd_limit);
1c3fae4d
TH
2514
2515 spd = (*scontrol >> 4) & 0xf;
2516 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2517
2518 return spd != limit;
2519}
2520
2521/**
3c567b7d 2522 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2523 * @link: Link in question
1c3fae4d
TH
2524 *
2525 * Test whether the spd limit in SControl matches
936fd732 2526 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2527 * whether hardreset is necessary to apply SATA spd
2528 * configuration.
2529 *
2530 * LOCKING:
2531 * Inherited from caller.
2532 *
2533 * RETURNS:
2534 * 1 if SATA spd configuration is needed, 0 otherwise.
2535 */
936fd732 2536int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2537{
2538 u32 scontrol;
2539
936fd732 2540 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2541 return 0;
2542
936fd732 2543 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2544}
2545
2546/**
3c567b7d 2547 * sata_set_spd - set SATA spd according to spd limit
936fd732 2548 * @link: Link to set SATA spd for
1c3fae4d 2549 *
936fd732 2550 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2551 *
2552 * LOCKING:
2553 * Inherited from caller.
2554 *
2555 * RETURNS:
2556 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2557 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2558 */
936fd732 2559int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2560{
2561 u32 scontrol;
81952c54 2562 int rc;
1c3fae4d 2563
936fd732 2564 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2565 return rc;
1c3fae4d 2566
936fd732 2567 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2568 return 0;
2569
936fd732 2570 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2571 return rc;
2572
1c3fae4d
TH
2573 return 1;
2574}
2575
452503f9
AC
2576/*
2577 * This mode timing computation functionality is ported over from
2578 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2579 */
2580/*
b352e57d 2581 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2582 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2583 * for UDMA6, which is currently supported only by Maxtor drives.
2584 *
2585 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2586 */
2587
2588static const struct ata_timing ata_timing[] = {
2589
2590 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2591 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2592 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2593 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2594
b352e57d
AC
2595 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2596 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2597 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2598 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2599 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2600
2601/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2602
452503f9
AC
2603 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2604 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2605 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2606
452503f9
AC
2607 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2608 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2609 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2610
b352e57d
AC
2611 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2612 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2613 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2614 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2615
2616 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2617 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2618 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2619
2620/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2621
2622 { 0xFF }
2623};
2624
2625#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2626#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2627
2628static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2629{
2630 q->setup = EZ(t->setup * 1000, T);
2631 q->act8b = EZ(t->act8b * 1000, T);
2632 q->rec8b = EZ(t->rec8b * 1000, T);
2633 q->cyc8b = EZ(t->cyc8b * 1000, T);
2634 q->active = EZ(t->active * 1000, T);
2635 q->recover = EZ(t->recover * 1000, T);
2636 q->cycle = EZ(t->cycle * 1000, T);
2637 q->udma = EZ(t->udma * 1000, UT);
2638}
2639
2640void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2641 struct ata_timing *m, unsigned int what)
2642{
2643 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2644 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2645 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2646 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2647 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2648 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2649 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2650 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2651}
2652
2653static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2654{
2655 const struct ata_timing *t;
2656
2657 for (t = ata_timing; t->mode != speed; t++)
91190758 2658 if (t->mode == 0xFF)
452503f9 2659 return NULL;
2e9edbf8 2660 return t;
452503f9
AC
2661}
2662
2663int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2664 struct ata_timing *t, int T, int UT)
2665{
2666 const struct ata_timing *s;
2667 struct ata_timing p;
2668
2669 /*
2e9edbf8 2670 * Find the mode.
75b1f2f8 2671 */
452503f9
AC
2672
2673 if (!(s = ata_timing_find_mode(speed)))
2674 return -EINVAL;
2675
75b1f2f8
AL
2676 memcpy(t, s, sizeof(*s));
2677
452503f9
AC
2678 /*
2679 * If the drive is an EIDE drive, it can tell us it needs extended
2680 * PIO/MW_DMA cycle timing.
2681 */
2682
2683 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2684 memset(&p, 0, sizeof(p));
2685 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2686 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2687 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2688 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2689 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2690 }
2691 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2692 }
2693
2694 /*
2695 * Convert the timing to bus clock counts.
2696 */
2697
75b1f2f8 2698 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2699
2700 /*
c893a3ae
RD
2701 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2702 * S.M.A.R.T * and some other commands. We have to ensure that the
2703 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2704 */
2705
fd3367af 2706 if (speed > XFER_PIO_6) {
452503f9
AC
2707 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2708 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2709 }
2710
2711 /*
c893a3ae 2712 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2713 */
2714
2715 if (t->act8b + t->rec8b < t->cyc8b) {
2716 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2717 t->rec8b = t->cyc8b - t->act8b;
2718 }
2719
2720 if (t->active + t->recover < t->cycle) {
2721 t->active += (t->cycle - (t->active + t->recover)) / 2;
2722 t->recover = t->cycle - t->active;
2723 }
a617c09f 2724
4f701d1e
AC
2725 /* In a few cases quantisation may produce enough errors to
2726 leave t->cycle too low for the sum of active and recovery
2727 if so we must correct this */
2728 if (t->active + t->recover > t->cycle)
2729 t->cycle = t->active + t->recover;
452503f9
AC
2730
2731 return 0;
2732}
2733
cf176e1a
TH
2734/**
2735 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2736 * @dev: Device to adjust xfer masks
458337db 2737 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2738 *
2739 * Adjust xfer masks of @dev downward. Note that this function
2740 * does not apply the change. Invoking ata_set_mode() afterwards
2741 * will apply the limit.
2742 *
2743 * LOCKING:
2744 * Inherited from caller.
2745 *
2746 * RETURNS:
2747 * 0 on success, negative errno on failure
2748 */
458337db 2749int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2750{
458337db
TH
2751 char buf[32];
2752 unsigned int orig_mask, xfer_mask;
2753 unsigned int pio_mask, mwdma_mask, udma_mask;
2754 int quiet, highbit;
cf176e1a 2755
458337db
TH
2756 quiet = !!(sel & ATA_DNXFER_QUIET);
2757 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2758
458337db
TH
2759 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2760 dev->mwdma_mask,
2761 dev->udma_mask);
2762 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2763
458337db
TH
2764 switch (sel) {
2765 case ATA_DNXFER_PIO:
2766 highbit = fls(pio_mask) - 1;
2767 pio_mask &= ~(1 << highbit);
2768 break;
2769
2770 case ATA_DNXFER_DMA:
2771 if (udma_mask) {
2772 highbit = fls(udma_mask) - 1;
2773 udma_mask &= ~(1 << highbit);
2774 if (!udma_mask)
2775 return -ENOENT;
2776 } else if (mwdma_mask) {
2777 highbit = fls(mwdma_mask) - 1;
2778 mwdma_mask &= ~(1 << highbit);
2779 if (!mwdma_mask)
2780 return -ENOENT;
2781 }
2782 break;
2783
2784 case ATA_DNXFER_40C:
2785 udma_mask &= ATA_UDMA_MASK_40C;
2786 break;
2787
2788 case ATA_DNXFER_FORCE_PIO0:
2789 pio_mask &= 1;
2790 case ATA_DNXFER_FORCE_PIO:
2791 mwdma_mask = 0;
2792 udma_mask = 0;
2793 break;
2794
458337db
TH
2795 default:
2796 BUG();
2797 }
2798
2799 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2800
2801 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2802 return -ENOENT;
2803
2804 if (!quiet) {
2805 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2806 snprintf(buf, sizeof(buf), "%s:%s",
2807 ata_mode_string(xfer_mask),
2808 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2809 else
2810 snprintf(buf, sizeof(buf), "%s",
2811 ata_mode_string(xfer_mask));
2812
2813 ata_dev_printk(dev, KERN_WARNING,
2814 "limiting speed to %s\n", buf);
2815 }
cf176e1a
TH
2816
2817 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2818 &dev->udma_mask);
2819
cf176e1a 2820 return 0;
cf176e1a
TH
2821}
2822
3373efd8 2823static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2824{
9af5c9c9 2825 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
2826 unsigned int err_mask;
2827 int rc;
1da177e4 2828
e8384607 2829 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2830 if (dev->xfer_shift == ATA_SHIFT_PIO)
2831 dev->flags |= ATA_DFLAG_PIO;
2832
3373efd8 2833 err_mask = ata_dev_set_xfermode(dev);
11750a40
AC
2834 /* Old CFA may refuse this command, which is just fine */
2835 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2836 err_mask &= ~AC_ERR_DEV;
0bc2a79a
AC
2837 /* Some very old devices and some bad newer ones fail any kind of
2838 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2839 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2840 dev->pio_mode <= XFER_PIO_2)
2841 err_mask &= ~AC_ERR_DEV;
83206a29 2842 if (err_mask) {
f15a1daf
TH
2843 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2844 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2845 return -EIO;
2846 }
1da177e4 2847
baa1e78a 2848 ehc->i.flags |= ATA_EHI_POST_SETMODE;
422c9daa 2849 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
baa1e78a 2850 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2851 if (rc)
83206a29 2852 return rc;
48a8a14f 2853
23e71c3d
TH
2854 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2855 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2856
f15a1daf
TH
2857 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2858 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2859 return 0;
1da177e4
LT
2860}
2861
1da177e4 2862/**
04351821 2863 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2864 * @link: link on which timings will be programmed
e82cbdb9 2865 * @r_failed_dev: out paramter for failed device
1da177e4 2866 *
04351821
AC
2867 * Standard implementation of the function used to tune and set
2868 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2869 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2870 * returned in @r_failed_dev.
780a87f7 2871 *
1da177e4 2872 * LOCKING:
0cba632b 2873 * PCI/etc. bus probe sem.
e82cbdb9
TH
2874 *
2875 * RETURNS:
2876 * 0 on success, negative errno otherwise
1da177e4 2877 */
04351821 2878
0260731f 2879int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 2880{
0260731f 2881 struct ata_port *ap = link->ap;
e8e0619f 2882 struct ata_device *dev;
f58229f8 2883 int rc = 0, used_dma = 0, found = 0;
3adcebb2 2884
a6d5a51c 2885 /* step 1: calculate xfer_mask */
f58229f8 2886 ata_link_for_each_dev(dev, link) {
acf356b1 2887 unsigned int pio_mask, dma_mask;
a6d5a51c 2888
e1211e3f 2889 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2890 continue;
2891
3373efd8 2892 ata_dev_xfermask(dev);
1da177e4 2893
acf356b1
TH
2894 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2895 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2896 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2897 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2898
4f65977d 2899 found = 1;
5444a6f4
AC
2900 if (dev->dma_mode)
2901 used_dma = 1;
a6d5a51c 2902 }
4f65977d 2903 if (!found)
e82cbdb9 2904 goto out;
a6d5a51c
TH
2905
2906 /* step 2: always set host PIO timings */
f58229f8 2907 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2908 if (!ata_dev_enabled(dev))
2909 continue;
2910
2911 if (!dev->pio_mode) {
f15a1daf 2912 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2913 rc = -EINVAL;
e82cbdb9 2914 goto out;
e8e0619f
TH
2915 }
2916
2917 dev->xfer_mode = dev->pio_mode;
2918 dev->xfer_shift = ATA_SHIFT_PIO;
2919 if (ap->ops->set_piomode)
2920 ap->ops->set_piomode(ap, dev);
2921 }
1da177e4 2922
a6d5a51c 2923 /* step 3: set host DMA timings */
f58229f8 2924 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2925 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2926 continue;
2927
2928 dev->xfer_mode = dev->dma_mode;
2929 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2930 if (ap->ops->set_dmamode)
2931 ap->ops->set_dmamode(ap, dev);
2932 }
1da177e4
LT
2933
2934 /* step 4: update devices' xfer mode */
f58229f8 2935 ata_link_for_each_dev(dev, link) {
18d90deb 2936 /* don't update suspended devices' xfer mode */
9666f400 2937 if (!ata_dev_enabled(dev))
83206a29
TH
2938 continue;
2939
3373efd8 2940 rc = ata_dev_set_mode(dev);
5bbc53f4 2941 if (rc)
e82cbdb9 2942 goto out;
83206a29 2943 }
1da177e4 2944
e8e0619f
TH
2945 /* Record simplex status. If we selected DMA then the other
2946 * host channels are not permitted to do so.
5444a6f4 2947 */
cca3974e 2948 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2949 ap->host->simplex_claimed = ap;
5444a6f4 2950
e82cbdb9
TH
2951 out:
2952 if (rc)
2953 *r_failed_dev = dev;
2954 return rc;
1da177e4
LT
2955}
2956
04351821
AC
2957/**
2958 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2959 * @link: link on which timings will be programmed
04351821
AC
2960 * @r_failed_dev: out paramter for failed device
2961 *
2962 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2963 * ata_set_mode() fails, pointer to the failing device is
2964 * returned in @r_failed_dev.
2965 *
2966 * LOCKING:
2967 * PCI/etc. bus probe sem.
2968 *
2969 * RETURNS:
2970 * 0 on success, negative errno otherwise
2971 */
0260731f 2972int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 2973{
0260731f
TH
2974 struct ata_port *ap = link->ap;
2975
04351821
AC
2976 /* has private set_mode? */
2977 if (ap->ops->set_mode)
0260731f
TH
2978 return ap->ops->set_mode(link, r_failed_dev);
2979 return ata_do_set_mode(link, r_failed_dev);
04351821
AC
2980}
2981
1fdffbce
JG
2982/**
2983 * ata_tf_to_host - issue ATA taskfile to host controller
2984 * @ap: port to which command is being issued
2985 * @tf: ATA taskfile register set
2986 *
2987 * Issues ATA taskfile register set to ATA host controller,
2988 * with proper synchronization with interrupt handler and
2989 * other threads.
2990 *
2991 * LOCKING:
cca3974e 2992 * spin_lock_irqsave(host lock)
1fdffbce
JG
2993 */
2994
2995static inline void ata_tf_to_host(struct ata_port *ap,
2996 const struct ata_taskfile *tf)
2997{
2998 ap->ops->tf_load(ap, tf);
2999 ap->ops->exec_command(ap, tf);
3000}
3001
1da177e4
LT
3002/**
3003 * ata_busy_sleep - sleep until BSY clears, or timeout
3004 * @ap: port containing status register to be polled
3005 * @tmout_pat: impatience timeout
3006 * @tmout: overall timeout
3007 *
780a87f7
JG
3008 * Sleep until ATA Status register bit BSY clears,
3009 * or a timeout occurs.
3010 *
d1adc1bb
TH
3011 * LOCKING:
3012 * Kernel thread context (may sleep).
3013 *
3014 * RETURNS:
3015 * 0 on success, -errno otherwise.
1da177e4 3016 */
d1adc1bb
TH
3017int ata_busy_sleep(struct ata_port *ap,
3018 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3019{
3020 unsigned long timer_start, timeout;
3021 u8 status;
3022
3023 status = ata_busy_wait(ap, ATA_BUSY, 300);
3024 timer_start = jiffies;
3025 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3026 while (status != 0xff && (status & ATA_BUSY) &&
3027 time_before(jiffies, timeout)) {
1da177e4
LT
3028 msleep(50);
3029 status = ata_busy_wait(ap, ATA_BUSY, 3);
3030 }
3031
d1adc1bb 3032 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3033 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3034 "port is slow to respond, please be patient "
3035 "(Status 0x%x)\n", status);
1da177e4
LT
3036
3037 timeout = timer_start + tmout;
d1adc1bb
TH
3038 while (status != 0xff && (status & ATA_BUSY) &&
3039 time_before(jiffies, timeout)) {
1da177e4
LT
3040 msleep(50);
3041 status = ata_chk_status(ap);
3042 }
3043
d1adc1bb
TH
3044 if (status == 0xff)
3045 return -ENODEV;
3046
1da177e4 3047 if (status & ATA_BUSY) {
f15a1daf 3048 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3049 "(%lu secs, Status 0x%x)\n",
3050 tmout / HZ, status);
d1adc1bb 3051 return -EBUSY;
1da177e4
LT
3052 }
3053
3054 return 0;
3055}
3056
d4b2bab4
TH
3057/**
3058 * ata_wait_ready - sleep until BSY clears, or timeout
3059 * @ap: port containing status register to be polled
3060 * @deadline: deadline jiffies for the operation
3061 *
3062 * Sleep until ATA Status register bit BSY clears, or timeout
3063 * occurs.
3064 *
3065 * LOCKING:
3066 * Kernel thread context (may sleep).
3067 *
3068 * RETURNS:
3069 * 0 on success, -errno otherwise.
3070 */
3071int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3072{
3073 unsigned long start = jiffies;
3074 int warned = 0;
3075
3076 while (1) {
3077 u8 status = ata_chk_status(ap);
3078 unsigned long now = jiffies;
3079
3080 if (!(status & ATA_BUSY))
3081 return 0;
936fd732 3082 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3083 return -ENODEV;
3084 if (time_after(now, deadline))
3085 return -EBUSY;
3086
3087 if (!warned && time_after(now, start + 5 * HZ) &&
3088 (deadline - now > 3 * HZ)) {
3089 ata_port_printk(ap, KERN_WARNING,
3090 "port is slow to respond, please be patient "
3091 "(Status 0x%x)\n", status);
3092 warned = 1;
3093 }
3094
3095 msleep(50);
3096 }
3097}
3098
3099static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3100 unsigned long deadline)
1da177e4
LT
3101{
3102 struct ata_ioports *ioaddr = &ap->ioaddr;
3103 unsigned int dev0 = devmask & (1 << 0);
3104 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3105 int rc, ret = 0;
1da177e4
LT
3106
3107 /* if device 0 was found in ata_devchk, wait for its
3108 * BSY bit to clear
3109 */
d4b2bab4
TH
3110 if (dev0) {
3111 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3112 if (rc) {
3113 if (rc != -ENODEV)
3114 return rc;
3115 ret = rc;
3116 }
d4b2bab4 3117 }
1da177e4 3118
e141d999
TH
3119 /* if device 1 was found in ata_devchk, wait for register
3120 * access briefly, then wait for BSY to clear.
1da177e4 3121 */
e141d999
TH
3122 if (dev1) {
3123 int i;
1da177e4
LT
3124
3125 ap->ops->dev_select(ap, 1);
e141d999
TH
3126
3127 /* Wait for register access. Some ATAPI devices fail
3128 * to set nsect/lbal after reset, so don't waste too
3129 * much time on it. We're gonna wait for !BSY anyway.
3130 */
3131 for (i = 0; i < 2; i++) {
3132 u8 nsect, lbal;
3133
3134 nsect = ioread8(ioaddr->nsect_addr);
3135 lbal = ioread8(ioaddr->lbal_addr);
3136 if ((nsect == 1) && (lbal == 1))
3137 break;
3138 msleep(50); /* give drive a breather */
3139 }
3140
d4b2bab4 3141 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3142 if (rc) {
3143 if (rc != -ENODEV)
3144 return rc;
3145 ret = rc;
3146 }
d4b2bab4 3147 }
1da177e4
LT
3148
3149 /* is all this really necessary? */
3150 ap->ops->dev_select(ap, 0);
3151 if (dev1)
3152 ap->ops->dev_select(ap, 1);
3153 if (dev0)
3154 ap->ops->dev_select(ap, 0);
d4b2bab4 3155
9b89391c 3156 return ret;
1da177e4
LT
3157}
3158
d4b2bab4
TH
3159static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3160 unsigned long deadline)
1da177e4
LT
3161{
3162 struct ata_ioports *ioaddr = &ap->ioaddr;
3163
44877b4e 3164 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3165
3166 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3167 iowrite8(ap->ctl, ioaddr->ctl_addr);
3168 udelay(20); /* FIXME: flush */
3169 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3170 udelay(20); /* FIXME: flush */
3171 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3172
3173 /* spec mandates ">= 2ms" before checking status.
3174 * We wait 150ms, because that was the magic delay used for
3175 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3176 * between when the ATA command register is written, and then
3177 * status is checked. Because waiting for "a while" before
3178 * checking status is fine, post SRST, we perform this magic
3179 * delay here as well.
09c7ad79
AC
3180 *
3181 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3182 */
3183 msleep(150);
3184
2e9edbf8 3185 /* Before we perform post reset processing we want to see if
298a41ca
TH
3186 * the bus shows 0xFF because the odd clown forgets the D7
3187 * pulldown resistor.
3188 */
d1adc1bb 3189 if (ata_check_status(ap) == 0xFF)
9b89391c 3190 return -ENODEV;
09c7ad79 3191
d4b2bab4 3192 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3193}
3194
3195/**
3196 * ata_bus_reset - reset host port and associated ATA channel
3197 * @ap: port to reset
3198 *
3199 * This is typically the first time we actually start issuing
3200 * commands to the ATA channel. We wait for BSY to clear, then
3201 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3202 * result. Determine what devices, if any, are on the channel
3203 * by looking at the device 0/1 error register. Look at the signature
3204 * stored in each device's taskfile registers, to determine if
3205 * the device is ATA or ATAPI.
3206 *
3207 * LOCKING:
0cba632b 3208 * PCI/etc. bus probe sem.
cca3974e 3209 * Obtains host lock.
1da177e4
LT
3210 *
3211 * SIDE EFFECTS:
198e0fed 3212 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3213 */
3214
3215void ata_bus_reset(struct ata_port *ap)
3216{
9af5c9c9 3217 struct ata_device *device = ap->link.device;
1da177e4
LT
3218 struct ata_ioports *ioaddr = &ap->ioaddr;
3219 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3220 u8 err;
aec5c3c1 3221 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3222 int rc;
1da177e4 3223
44877b4e 3224 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3225
3226 /* determine if device 0/1 are present */
3227 if (ap->flags & ATA_FLAG_SATA_RESET)
3228 dev0 = 1;
3229 else {
3230 dev0 = ata_devchk(ap, 0);
3231 if (slave_possible)
3232 dev1 = ata_devchk(ap, 1);
3233 }
3234
3235 if (dev0)
3236 devmask |= (1 << 0);
3237 if (dev1)
3238 devmask |= (1 << 1);
3239
3240 /* select device 0 again */
3241 ap->ops->dev_select(ap, 0);
3242
3243 /* issue bus reset */
9b89391c
TH
3244 if (ap->flags & ATA_FLAG_SRST) {
3245 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3246 if (rc && rc != -ENODEV)
aec5c3c1 3247 goto err_out;
9b89391c 3248 }
1da177e4
LT
3249
3250 /*
3251 * determine by signature whether we have ATA or ATAPI devices
3252 */
3f19859e 3253 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3254 if ((slave_possible) && (err != 0x81))
3f19859e 3255 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3256
1da177e4 3257 /* is double-select really necessary? */
9af5c9c9 3258 if (device[1].class != ATA_DEV_NONE)
1da177e4 3259 ap->ops->dev_select(ap, 1);
9af5c9c9 3260 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3261 ap->ops->dev_select(ap, 0);
3262
3263 /* if no devices were detected, disable this port */
9af5c9c9
TH
3264 if ((device[0].class == ATA_DEV_NONE) &&
3265 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3266 goto err_out;
3267
3268 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3269 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3270 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3271 }
3272
3273 DPRINTK("EXIT\n");
3274 return;
3275
3276err_out:
f15a1daf 3277 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3278 ata_port_disable(ap);
1da177e4
LT
3279
3280 DPRINTK("EXIT\n");
3281}
3282
d7bb4cc7 3283/**
936fd732
TH
3284 * sata_link_debounce - debounce SATA phy status
3285 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3286 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3287 * @deadline: deadline jiffies for the operation
d7bb4cc7 3288 *
936fd732 3289* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3290 * holding the same value where DET is not 1 for @duration polled
3291 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3292 * beginning of the stable state. Because DET gets stuck at 1 on
3293 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3294 * until timeout then returns 0 if DET is stable at 1.
3295 *
d4b2bab4
TH
3296 * @timeout is further limited by @deadline. The sooner of the
3297 * two is used.
3298 *
d7bb4cc7
TH
3299 * LOCKING:
3300 * Kernel thread context (may sleep)
3301 *
3302 * RETURNS:
3303 * 0 on success, -errno on failure.
3304 */
936fd732
TH
3305int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3306 unsigned long deadline)
7a7921e8 3307{
d7bb4cc7 3308 unsigned long interval_msec = params[0];
d4b2bab4
TH
3309 unsigned long duration = msecs_to_jiffies(params[1]);
3310 unsigned long last_jiffies, t;
d7bb4cc7
TH
3311 u32 last, cur;
3312 int rc;
3313
d4b2bab4
TH
3314 t = jiffies + msecs_to_jiffies(params[2]);
3315 if (time_before(t, deadline))
3316 deadline = t;
3317
936fd732 3318 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3319 return rc;
3320 cur &= 0xf;
3321
3322 last = cur;
3323 last_jiffies = jiffies;
3324
3325 while (1) {
3326 msleep(interval_msec);
936fd732 3327 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3328 return rc;
3329 cur &= 0xf;
3330
3331 /* DET stable? */
3332 if (cur == last) {
d4b2bab4 3333 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3334 continue;
3335 if (time_after(jiffies, last_jiffies + duration))
3336 return 0;
3337 continue;
3338 }
3339
3340 /* unstable, start over */
3341 last = cur;
3342 last_jiffies = jiffies;
3343
f1545154
TH
3344 /* Check deadline. If debouncing failed, return
3345 * -EPIPE to tell upper layer to lower link speed.
3346 */
d4b2bab4 3347 if (time_after(jiffies, deadline))
f1545154 3348 return -EPIPE;
d7bb4cc7
TH
3349 }
3350}
3351
3352/**
936fd732
TH
3353 * sata_link_resume - resume SATA link
3354 * @link: ATA link to resume SATA
d7bb4cc7 3355 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3356 * @deadline: deadline jiffies for the operation
d7bb4cc7 3357 *
936fd732 3358 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3359 *
3360 * LOCKING:
3361 * Kernel thread context (may sleep)
3362 *
3363 * RETURNS:
3364 * 0 on success, -errno on failure.
3365 */
936fd732
TH
3366int sata_link_resume(struct ata_link *link, const unsigned long *params,
3367 unsigned long deadline)
d7bb4cc7
TH
3368{
3369 u32 scontrol;
81952c54
TH
3370 int rc;
3371
936fd732 3372 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3373 return rc;
7a7921e8 3374
852ee16a 3375 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3376
936fd732 3377 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3378 return rc;
7a7921e8 3379
d7bb4cc7
TH
3380 /* Some PHYs react badly if SStatus is pounded immediately
3381 * after resuming. Delay 200ms before debouncing.
3382 */
3383 msleep(200);
7a7921e8 3384
936fd732 3385 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3386}
3387
f5914a46
TH
3388/**
3389 * ata_std_prereset - prepare for reset
cc0680a5 3390 * @link: ATA link to be reset
d4b2bab4 3391 * @deadline: deadline jiffies for the operation
f5914a46 3392 *
cc0680a5 3393 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3394 * prereset makes libata abort whole reset sequence and give up
3395 * that port, so prereset should be best-effort. It does its
3396 * best to prepare for reset sequence but if things go wrong, it
3397 * should just whine, not fail.
f5914a46
TH
3398 *
3399 * LOCKING:
3400 * Kernel thread context (may sleep)
3401 *
3402 * RETURNS:
3403 * 0 on success, -errno otherwise.
3404 */
cc0680a5 3405int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3406{
cc0680a5 3407 struct ata_port *ap = link->ap;
936fd732 3408 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3409 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3410 int rc;
3411
31daabda 3412 /* handle link resume */
28324304 3413 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3414 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3415 ehc->i.action |= ATA_EH_HARDRESET;
3416
f5914a46
TH
3417 /* if we're about to do hardreset, nothing more to do */
3418 if (ehc->i.action & ATA_EH_HARDRESET)
3419 return 0;
3420
936fd732 3421 /* if SATA, resume link */
a16abc0b 3422 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3423 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3424 /* whine about phy resume failure but proceed */
3425 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3426 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3427 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3428 }
3429
3430 /* Wait for !BSY if the controller can wait for the first D2H
3431 * Reg FIS and we don't know that no device is attached.
3432 */
0c88758b 3433 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3434 rc = ata_wait_ready(ap, deadline);
6dffaf61 3435 if (rc && rc != -ENODEV) {
cc0680a5 3436 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3437 "(errno=%d), forcing hardreset\n", rc);
3438 ehc->i.action |= ATA_EH_HARDRESET;
3439 }
3440 }
f5914a46
TH
3441
3442 return 0;
3443}
3444
c2bd5804
TH
3445/**
3446 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3447 * @link: ATA link to reset
c2bd5804 3448 * @classes: resulting classes of attached devices
d4b2bab4 3449 * @deadline: deadline jiffies for the operation
c2bd5804 3450 *
52783c5d 3451 * Reset host port using ATA SRST.
c2bd5804
TH
3452 *
3453 * LOCKING:
3454 * Kernel thread context (may sleep)
3455 *
3456 * RETURNS:
3457 * 0 on success, -errno otherwise.
3458 */
cc0680a5 3459int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3460 unsigned long deadline)
c2bd5804 3461{
cc0680a5 3462 struct ata_port *ap = link->ap;
c2bd5804 3463 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3464 unsigned int devmask = 0;
3465 int rc;
c2bd5804
TH
3466 u8 err;
3467
3468 DPRINTK("ENTER\n");
3469
936fd732 3470 if (ata_link_offline(link)) {
3a39746a
TH
3471 classes[0] = ATA_DEV_NONE;
3472 goto out;
3473 }
3474
c2bd5804
TH
3475 /* determine if device 0/1 are present */
3476 if (ata_devchk(ap, 0))
3477 devmask |= (1 << 0);
3478 if (slave_possible && ata_devchk(ap, 1))
3479 devmask |= (1 << 1);
3480
c2bd5804
TH
3481 /* select device 0 again */
3482 ap->ops->dev_select(ap, 0);
3483
3484 /* issue bus reset */
3485 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3486 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3487 /* if link is occupied, -ENODEV too is an error */
936fd732 3488 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3489 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3490 return rc;
c2bd5804
TH
3491 }
3492
3493 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3494 classes[0] = ata_dev_try_classify(&link->device[0],
3495 devmask & (1 << 0), &err);
c2bd5804 3496 if (slave_possible && err != 0x81)
3f19859e
TH
3497 classes[1] = ata_dev_try_classify(&link->device[1],
3498 devmask & (1 << 1), &err);
c2bd5804 3499
3a39746a 3500 out:
c2bd5804
TH
3501 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3502 return 0;
3503}
3504
3505/**
cc0680a5
TH
3506 * sata_link_hardreset - reset link via SATA phy reset
3507 * @link: link to reset
b6103f6d 3508 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3509 * @deadline: deadline jiffies for the operation
c2bd5804 3510 *
cc0680a5 3511 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3512 *
3513 * LOCKING:
3514 * Kernel thread context (may sleep)
3515 *
3516 * RETURNS:
3517 * 0 on success, -errno otherwise.
3518 */
cc0680a5 3519int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3520 unsigned long deadline)
c2bd5804 3521{
852ee16a 3522 u32 scontrol;
81952c54 3523 int rc;
852ee16a 3524
c2bd5804
TH
3525 DPRINTK("ENTER\n");
3526
936fd732 3527 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3528 /* SATA spec says nothing about how to reconfigure
3529 * spd. To be on the safe side, turn off phy during
3530 * reconfiguration. This works for at least ICH7 AHCI
3531 * and Sil3124.
3532 */
936fd732 3533 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3534 goto out;
81952c54 3535
a34b6fc0 3536 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3537
936fd732 3538 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3539 goto out;
1c3fae4d 3540
936fd732 3541 sata_set_spd(link);
1c3fae4d
TH
3542 }
3543
3544 /* issue phy wake/reset */
936fd732 3545 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3546 goto out;
81952c54 3547
852ee16a 3548 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3549
936fd732 3550 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3551 goto out;
c2bd5804 3552
1c3fae4d 3553 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3554 * 10.4.2 says at least 1 ms.
3555 */
3556 msleep(1);
3557
936fd732
TH
3558 /* bring link back */
3559 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3560 out:
3561 DPRINTK("EXIT, rc=%d\n", rc);
3562 return rc;
3563}
3564
3565/**
3566 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3567 * @link: link to reset
b6103f6d 3568 * @class: resulting class of attached device
d4b2bab4 3569 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3570 *
3571 * SATA phy-reset host port using DET bits of SControl register,
3572 * wait for !BSY and classify the attached device.
3573 *
3574 * LOCKING:
3575 * Kernel thread context (may sleep)
3576 *
3577 * RETURNS:
3578 * 0 on success, -errno otherwise.
3579 */
cc0680a5 3580int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3581 unsigned long deadline)
b6103f6d 3582{
cc0680a5 3583 struct ata_port *ap = link->ap;
936fd732 3584 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3585 int rc;
3586
3587 DPRINTK("ENTER\n");
3588
3589 /* do hardreset */
cc0680a5 3590 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3591 if (rc) {
cc0680a5 3592 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3593 "COMRESET failed (errno=%d)\n", rc);
3594 return rc;
3595 }
c2bd5804 3596
c2bd5804 3597 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3598 if (ata_link_offline(link)) {
c2bd5804
TH
3599 *class = ATA_DEV_NONE;
3600 DPRINTK("EXIT, link offline\n");
3601 return 0;
3602 }
3603
34fee227
TH
3604 /* wait a while before checking status, see SRST for more info */
3605 msleep(150);
3606
d4b2bab4 3607 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3608 /* link occupied, -ENODEV too is an error */
3609 if (rc) {
cc0680a5 3610 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3611 "COMRESET failed (errno=%d)\n", rc);
3612 return rc;
c2bd5804
TH
3613 }
3614
3a39746a
TH
3615 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3616
3f19859e 3617 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3618
3619 DPRINTK("EXIT, class=%u\n", *class);
3620 return 0;
3621}
3622
3623/**
3624 * ata_std_postreset - standard postreset callback
cc0680a5 3625 * @link: the target ata_link
c2bd5804
TH
3626 * @classes: classes of attached devices
3627 *
3628 * This function is invoked after a successful reset. Note that
3629 * the device might have been reset more than once using
3630 * different reset methods before postreset is invoked.
c2bd5804 3631 *
c2bd5804
TH
3632 * LOCKING:
3633 * Kernel thread context (may sleep)
3634 */
cc0680a5 3635void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3636{
cc0680a5 3637 struct ata_port *ap = link->ap;
dc2b3515
TH
3638 u32 serror;
3639
c2bd5804
TH
3640 DPRINTK("ENTER\n");
3641
c2bd5804 3642 /* print link status */
936fd732 3643 sata_print_link_status(link);
c2bd5804 3644
dc2b3515 3645 /* clear SError */
936fd732
TH
3646 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3647 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3648
c2bd5804
TH
3649 /* is double-select really necessary? */
3650 if (classes[0] != ATA_DEV_NONE)
3651 ap->ops->dev_select(ap, 1);
3652 if (classes[1] != ATA_DEV_NONE)
3653 ap->ops->dev_select(ap, 0);
3654
3a39746a
TH
3655 /* bail out if no device is present */
3656 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3657 DPRINTK("EXIT, no device\n");
3658 return;
3659 }
3660
3661 /* set up device control */
0d5ff566
TH
3662 if (ap->ioaddr.ctl_addr)
3663 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3664
3665 DPRINTK("EXIT\n");
3666}
3667
623a3128
TH
3668/**
3669 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3670 * @dev: device to compare against
3671 * @new_class: class of the new device
3672 * @new_id: IDENTIFY page of the new device
3673 *
3674 * Compare @new_class and @new_id against @dev and determine
3675 * whether @dev is the device indicated by @new_class and
3676 * @new_id.
3677 *
3678 * LOCKING:
3679 * None.
3680 *
3681 * RETURNS:
3682 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3683 */
3373efd8
TH
3684static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3685 const u16 *new_id)
623a3128
TH
3686{
3687 const u16 *old_id = dev->id;
a0cf733b
TH
3688 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3689 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3690
3691 if (dev->class != new_class) {
f15a1daf
TH
3692 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3693 dev->class, new_class);
623a3128
TH
3694 return 0;
3695 }
3696
a0cf733b
TH
3697 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3698 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3699 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3700 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3701
3702 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3703 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3704 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3705 return 0;
3706 }
3707
3708 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3709 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3710 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3711 return 0;
3712 }
3713
623a3128
TH
3714 return 1;
3715}
3716
3717/**
fe30911b 3718 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3719 * @dev: target ATA device
bff04647 3720 * @readid_flags: read ID flags
623a3128
TH
3721 *
3722 * Re-read IDENTIFY page and make sure @dev is still attached to
3723 * the port.
3724 *
3725 * LOCKING:
3726 * Kernel thread context (may sleep)
3727 *
3728 * RETURNS:
3729 * 0 on success, negative errno otherwise
3730 */
fe30911b 3731int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3732{
5eb45c02 3733 unsigned int class = dev->class;
9af5c9c9 3734 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3735 int rc;
3736
fe635c7e 3737 /* read ID data */
bff04647 3738 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3739 if (rc)
fe30911b 3740 return rc;
623a3128
TH
3741
3742 /* is the device still there? */
fe30911b
TH
3743 if (!ata_dev_same_device(dev, class, id))
3744 return -ENODEV;
623a3128 3745
fe635c7e 3746 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3747 return 0;
3748}
3749
3750/**
3751 * ata_dev_revalidate - Revalidate ATA device
3752 * @dev: device to revalidate
422c9daa 3753 * @new_class: new class code
fe30911b
TH
3754 * @readid_flags: read ID flags
3755 *
3756 * Re-read IDENTIFY page, make sure @dev is still attached to the
3757 * port and reconfigure it according to the new IDENTIFY page.
3758 *
3759 * LOCKING:
3760 * Kernel thread context (may sleep)
3761 *
3762 * RETURNS:
3763 * 0 on success, negative errno otherwise
3764 */
422c9daa
TH
3765int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3766 unsigned int readid_flags)
fe30911b 3767{
6ddcd3b0 3768 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3769 int rc;
3770
3771 if (!ata_dev_enabled(dev))
3772 return -ENODEV;
3773
422c9daa
TH
3774 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3775 if (ata_class_enabled(new_class) &&
3776 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3777 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3778 dev->class, new_class);
3779 rc = -ENODEV;
3780 goto fail;
3781 }
3782
fe30911b
TH
3783 /* re-read ID */
3784 rc = ata_dev_reread_id(dev, readid_flags);
3785 if (rc)
3786 goto fail;
623a3128
TH
3787
3788 /* configure device according to the new ID */
efdaedc4 3789 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3790 if (rc)
3791 goto fail;
3792
3793 /* verify n_sectors hasn't changed */
b54eebd6
TH
3794 if (dev->class == ATA_DEV_ATA && n_sectors &&
3795 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3796 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3797 "%llu != %llu\n",
3798 (unsigned long long)n_sectors,
3799 (unsigned long long)dev->n_sectors);
8270bec4
TH
3800
3801 /* restore original n_sectors */
3802 dev->n_sectors = n_sectors;
3803
6ddcd3b0
TH
3804 rc = -ENODEV;
3805 goto fail;
3806 }
3807
3808 return 0;
623a3128
TH
3809
3810 fail:
f15a1daf 3811 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3812 return rc;
3813}
3814
6919a0a6
AC
3815struct ata_blacklist_entry {
3816 const char *model_num;
3817 const char *model_rev;
3818 unsigned long horkage;
3819};
3820
3821static const struct ata_blacklist_entry ata_device_blacklist [] = {
3822 /* Devices with DMA related problems under Linux */
3823 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3824 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3825 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3826 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3827 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3828 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3829 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3830 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3831 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3832 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3833 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3834 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3835 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3836 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3837 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3838 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3839 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3840 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3841 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3842 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3843 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3844 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3845 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3846 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3847 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3848 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3849 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3850 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3851 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
39f19886 3852 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
5acd50f6 3853 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
39ce7128
TH
3854 { "IOMEGA ZIP 250 ATAPI Floppy",
3855 NULL, ATA_HORKAGE_NODMA },
6919a0a6 3856
18d6e9d5 3857 /* Weird ATAPI devices */
40a1d531 3858 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3859
6919a0a6
AC
3860 /* Devices we expect to fail diagnostics */
3861
3862 /* Devices where NCQ should be avoided */
3863 /* NCQ is slow */
3864 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3865 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3866 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3867 /* NCQ is broken */
539cc7c7 3868 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3869 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
2f8d90ab 3870 { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
539cc7c7
JG
3871 ATA_HORKAGE_NONCQ },
3872
36e337d0
RH
3873 /* Blacklist entries taken from Silicon Image 3124/3132
3874 Windows driver .inf file - also several Linux problem reports */
3875 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3876 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3877 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
3878 /* Drives which do spurious command completion */
3879 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 3880 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
e14cbfa6 3881 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
2f8fcebb 3882 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
a520f261 3883 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3fb6589c 3884 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
0e3dbc01 3885 { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, },
5d6aca8d 3886 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
6919a0a6 3887
16c55b03
TH
3888 /* devices which puke on READ_NATIVE_MAX */
3889 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3890 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3891 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3892 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6
AC
3893
3894 /* End Marker */
3895 { }
1da177e4 3896};
2e9edbf8 3897
539cc7c7
JG
3898int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3899{
3900 const char *p;
3901 int len;
3902
3903 /*
3904 * check for trailing wildcard: *\0
3905 */
3906 p = strchr(patt, wildchar);
3907 if (p && ((*(p + 1)) == 0))
3908 len = p - patt;
3909 else
3910 len = strlen(name);
3911
3912 return strncmp(patt, name, len);
3913}
3914
75683fe7 3915static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 3916{
8bfa79fc
TH
3917 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3918 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3919 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3920
8bfa79fc
TH
3921 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3922 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3923
6919a0a6 3924 while (ad->model_num) {
539cc7c7 3925 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
3926 if (ad->model_rev == NULL)
3927 return ad->horkage;
539cc7c7 3928 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 3929 return ad->horkage;
f4b15fef 3930 }
6919a0a6 3931 ad++;
f4b15fef 3932 }
1da177e4
LT
3933 return 0;
3934}
3935
6919a0a6
AC
3936static int ata_dma_blacklisted(const struct ata_device *dev)
3937{
3938 /* We don't support polling DMA.
3939 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3940 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3941 */
9af5c9c9 3942 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
3943 (dev->flags & ATA_DFLAG_CDB_INTR))
3944 return 1;
75683fe7 3945 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
3946}
3947
a6d5a51c
TH
3948/**
3949 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3950 * @dev: Device to compute xfermask for
3951 *
acf356b1
TH
3952 * Compute supported xfermask of @dev and store it in
3953 * dev->*_mask. This function is responsible for applying all
3954 * known limits including host controller limits, device
3955 * blacklist, etc...
a6d5a51c
TH
3956 *
3957 * LOCKING:
3958 * None.
a6d5a51c 3959 */
3373efd8 3960static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3961{
9af5c9c9
TH
3962 struct ata_link *link = dev->link;
3963 struct ata_port *ap = link->ap;
cca3974e 3964 struct ata_host *host = ap->host;
a6d5a51c 3965 unsigned long xfer_mask;
1da177e4 3966
37deecb5 3967 /* controller modes available */
565083e1
TH
3968 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3969 ap->mwdma_mask, ap->udma_mask);
3970
8343f889 3971 /* drive modes available */
37deecb5
TH
3972 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3973 dev->mwdma_mask, dev->udma_mask);
3974 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3975
b352e57d
AC
3976 /*
3977 * CFA Advanced TrueIDE timings are not allowed on a shared
3978 * cable
3979 */
3980 if (ata_dev_pair(dev)) {
3981 /* No PIO5 or PIO6 */
3982 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3983 /* No MWDMA3 or MWDMA 4 */
3984 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3985 }
3986
37deecb5
TH
3987 if (ata_dma_blacklisted(dev)) {
3988 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3989 ata_dev_printk(dev, KERN_WARNING,
3990 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3991 }
a6d5a51c 3992
14d66ab7
PV
3993 if ((host->flags & ATA_HOST_SIMPLEX) &&
3994 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3995 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3996 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3997 "other device, disabling DMA\n");
5444a6f4 3998 }
565083e1 3999
e424675f
JG
4000 if (ap->flags & ATA_FLAG_NO_IORDY)
4001 xfer_mask &= ata_pio_mask_no_iordy(dev);
4002
5444a6f4 4003 if (ap->ops->mode_filter)
a76b62ca 4004 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4005
8343f889
RH
4006 /* Apply cable rule here. Don't apply it early because when
4007 * we handle hot plug the cable type can itself change.
4008 * Check this last so that we know if the transfer rate was
4009 * solely limited by the cable.
4010 * Unknown or 80 wire cables reported host side are checked
4011 * drive side as well. Cases where we know a 40wire cable
4012 * is used safely for 80 are not checked here.
4013 */
4014 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4015 /* UDMA/44 or higher would be available */
4016 if((ap->cbl == ATA_CBL_PATA40) ||
4017 (ata_drive_40wire(dev->id) &&
4018 (ap->cbl == ATA_CBL_PATA_UNK ||
4019 ap->cbl == ATA_CBL_PATA80))) {
4020 ata_dev_printk(dev, KERN_WARNING,
4021 "limited to UDMA/33 due to 40-wire cable\n");
4022 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4023 }
4024
565083e1
TH
4025 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4026 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4027}
4028
1da177e4
LT
4029/**
4030 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4031 * @dev: Device to which command will be sent
4032 *
780a87f7
JG
4033 * Issue SET FEATURES - XFER MODE command to device @dev
4034 * on port @ap.
4035 *
1da177e4 4036 * LOCKING:
0cba632b 4037 * PCI/etc. bus probe sem.
83206a29
TH
4038 *
4039 * RETURNS:
4040 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4041 */
4042
3373efd8 4043static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4044{
a0123703 4045 struct ata_taskfile tf;
83206a29 4046 unsigned int err_mask;
1da177e4
LT
4047
4048 /* set up set-features taskfile */
4049 DPRINTK("set features - xfer mode\n");
4050
464cf177
TH
4051 /* Some controllers and ATAPI devices show flaky interrupt
4052 * behavior after setting xfer mode. Use polling instead.
4053 */
3373efd8 4054 ata_tf_init(dev, &tf);
a0123703
TH
4055 tf.command = ATA_CMD_SET_FEATURES;
4056 tf.feature = SETFEATURES_XFER;
464cf177 4057 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
4058 tf.protocol = ATA_PROT_NODATA;
4059 tf.nsect = dev->xfer_mode;
1da177e4 4060
3373efd8 4061 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9f45cbd3
KCA
4062
4063 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4064 return err_mask;
4065}
4066
4067/**
4068 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4069 * @dev: Device to which command will be sent
4070 * @enable: Whether to enable or disable the feature
4071 *
4072 * Issue SET FEATURES - SATA FEATURES command to device @dev
4073 * on port @ap with sector count set to indicate Asynchronous
4074 * Notification feature
4075 *
4076 * LOCKING:
4077 * PCI/etc. bus probe sem.
4078 *
4079 * RETURNS:
4080 * 0 on success, AC_ERR_* mask otherwise.
4081 */
4082static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4083{
4084 struct ata_taskfile tf;
4085 unsigned int err_mask;
4086
4087 /* set up set-features taskfile */
4088 DPRINTK("set features - SATA features\n");
4089
4090 ata_tf_init(dev, &tf);
4091 tf.command = ATA_CMD_SET_FEATURES;
4092 tf.feature = enable;
4093 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4094 tf.protocol = ATA_PROT_NODATA;
4095 tf.nsect = SATA_AN;
4096
4097 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 4098
83206a29
TH
4099 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4100 return err_mask;
1da177e4
LT
4101}
4102
8bf62ece
AL
4103/**
4104 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4105 * @dev: Device to which command will be sent
e2a7f77a
RD
4106 * @heads: Number of heads (taskfile parameter)
4107 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4108 *
4109 * LOCKING:
6aff8f1f
TH
4110 * Kernel thread context (may sleep)
4111 *
4112 * RETURNS:
4113 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4114 */
3373efd8
TH
4115static unsigned int ata_dev_init_params(struct ata_device *dev,
4116 u16 heads, u16 sectors)
8bf62ece 4117{
a0123703 4118 struct ata_taskfile tf;
6aff8f1f 4119 unsigned int err_mask;
8bf62ece
AL
4120
4121 /* Number of sectors per track 1-255. Number of heads 1-16 */
4122 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4123 return AC_ERR_INVALID;
8bf62ece
AL
4124
4125 /* set up init dev params taskfile */
4126 DPRINTK("init dev params \n");
4127
3373efd8 4128 ata_tf_init(dev, &tf);
a0123703
TH
4129 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4130 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4131 tf.protocol = ATA_PROT_NODATA;
4132 tf.nsect = sectors;
4133 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4134
3373efd8 4135 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
18b2466c
AC
4136 /* A clean abort indicates an original or just out of spec drive
4137 and we should continue as we issue the setup based on the
4138 drive reported working geometry */
4139 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4140 err_mask = 0;
8bf62ece 4141
6aff8f1f
TH
4142 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4143 return err_mask;
8bf62ece
AL
4144}
4145
1da177e4 4146/**
0cba632b
JG
4147 * ata_sg_clean - Unmap DMA memory associated with command
4148 * @qc: Command containing DMA memory to be released
4149 *
4150 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4151 *
4152 * LOCKING:
cca3974e 4153 * spin_lock_irqsave(host lock)
1da177e4 4154 */
70e6ad0c 4155void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4156{
4157 struct ata_port *ap = qc->ap;
cedc9a47 4158 struct scatterlist *sg = qc->__sg;
1da177e4 4159 int dir = qc->dma_dir;
cedc9a47 4160 void *pad_buf = NULL;
1da177e4 4161
a4631474
TH
4162 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4163 WARN_ON(sg == NULL);
1da177e4
LT
4164
4165 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4166 WARN_ON(qc->n_elem > 1);
1da177e4 4167
2c13b7ce 4168 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4169
cedc9a47
JG
4170 /* if we padded the buffer out to 32-bit bound, and data
4171 * xfer direction is from-device, we must copy from the
4172 * pad buffer back into the supplied buffer
4173 */
4174 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4175 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4176
4177 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4178 if (qc->n_elem)
2f1f610b 4179 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
4180 /* restore last sg */
4181 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4182 if (pad_buf) {
4183 struct scatterlist *psg = &qc->pad_sgent;
4184 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4185 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4186 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4187 }
4188 } else {
2e242fa9 4189 if (qc->n_elem)
2f1f610b 4190 dma_unmap_single(ap->dev,
e1410f2d
JG
4191 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4192 dir);
cedc9a47
JG
4193 /* restore sg */
4194 sg->length += qc->pad_len;
4195 if (pad_buf)
4196 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4197 pad_buf, qc->pad_len);
4198 }
1da177e4
LT
4199
4200 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4201 qc->__sg = NULL;
1da177e4
LT
4202}
4203
4204/**
4205 * ata_fill_sg - Fill PCI IDE PRD table
4206 * @qc: Metadata associated with taskfile to be transferred
4207 *
780a87f7
JG
4208 * Fill PCI IDE PRD (scatter-gather) table with segments
4209 * associated with the current disk command.
4210 *
1da177e4 4211 * LOCKING:
cca3974e 4212 * spin_lock_irqsave(host lock)
1da177e4
LT
4213 *
4214 */
4215static void ata_fill_sg(struct ata_queued_cmd *qc)
4216{
1da177e4 4217 struct ata_port *ap = qc->ap;
cedc9a47
JG
4218 struct scatterlist *sg;
4219 unsigned int idx;
1da177e4 4220
a4631474 4221 WARN_ON(qc->__sg == NULL);
f131883e 4222 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4223
4224 idx = 0;
cedc9a47 4225 ata_for_each_sg(sg, qc) {
1da177e4
LT
4226 u32 addr, offset;
4227 u32 sg_len, len;
4228
4229 /* determine if physical DMA addr spans 64K boundary.
4230 * Note h/w doesn't support 64-bit, so we unconditionally
4231 * truncate dma_addr_t to u32.
4232 */
4233 addr = (u32) sg_dma_address(sg);
4234 sg_len = sg_dma_len(sg);
4235
4236 while (sg_len) {
4237 offset = addr & 0xffff;
4238 len = sg_len;
4239 if ((offset + sg_len) > 0x10000)
4240 len = 0x10000 - offset;
4241
4242 ap->prd[idx].addr = cpu_to_le32(addr);
4243 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4244 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4245
4246 idx++;
4247 sg_len -= len;
4248 addr += len;
4249 }
4250 }
4251
4252 if (idx)
4253 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4254}
b9a4197e 4255
d26fc955
AC
4256/**
4257 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4258 * @qc: Metadata associated with taskfile to be transferred
4259 *
4260 * Fill PCI IDE PRD (scatter-gather) table with segments
4261 * associated with the current disk command. Perform the fill
4262 * so that we avoid writing any length 64K records for
4263 * controllers that don't follow the spec.
4264 *
4265 * LOCKING:
4266 * spin_lock_irqsave(host lock)
4267 *
4268 */
4269static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4270{
4271 struct ata_port *ap = qc->ap;
4272 struct scatterlist *sg;
4273 unsigned int idx;
4274
4275 WARN_ON(qc->__sg == NULL);
4276 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4277
4278 idx = 0;
4279 ata_for_each_sg(sg, qc) {
4280 u32 addr, offset;
4281 u32 sg_len, len, blen;
4282
4283 /* determine if physical DMA addr spans 64K boundary.
4284 * Note h/w doesn't support 64-bit, so we unconditionally
4285 * truncate dma_addr_t to u32.
4286 */
4287 addr = (u32) sg_dma_address(sg);
4288 sg_len = sg_dma_len(sg);
4289
4290 while (sg_len) {
4291 offset = addr & 0xffff;
4292 len = sg_len;
4293 if ((offset + sg_len) > 0x10000)
4294 len = 0x10000 - offset;
4295
4296 blen = len & 0xffff;
4297 ap->prd[idx].addr = cpu_to_le32(addr);
4298 if (blen == 0) {
4299 /* Some PATA chipsets like the CS5530 can't
4300 cope with 0x0000 meaning 64K as the spec says */
4301 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4302 blen = 0x8000;
4303 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4304 }
4305 ap->prd[idx].flags_len = cpu_to_le32(blen);
4306 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4307
4308 idx++;
4309 sg_len -= len;
4310 addr += len;
4311 }
4312 }
4313
4314 if (idx)
4315 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4316}
4317
1da177e4
LT
4318/**
4319 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4320 * @qc: Metadata associated with taskfile to check
4321 *
780a87f7
JG
4322 * Allow low-level driver to filter ATA PACKET commands, returning
4323 * a status indicating whether or not it is OK to use DMA for the
4324 * supplied PACKET command.
4325 *
1da177e4 4326 * LOCKING:
cca3974e 4327 * spin_lock_irqsave(host lock)
0cba632b 4328 *
1da177e4
LT
4329 * RETURNS: 0 when ATAPI DMA can be used
4330 * nonzero otherwise
4331 */
4332int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4333{
4334 struct ata_port *ap = qc->ap;
b9a4197e
TH
4335
4336 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4337 * few ATAPI devices choke on such DMA requests.
4338 */
4339 if (unlikely(qc->nbytes & 15))
4340 return 1;
6f23a31d 4341
1da177e4 4342 if (ap->ops->check_atapi_dma)
b9a4197e 4343 return ap->ops->check_atapi_dma(qc);
1da177e4 4344
b9a4197e 4345 return 0;
1da177e4 4346}
b9a4197e 4347
1da177e4
LT
4348/**
4349 * ata_qc_prep - Prepare taskfile for submission
4350 * @qc: Metadata associated with taskfile to be prepared
4351 *
780a87f7
JG
4352 * Prepare ATA taskfile for submission.
4353 *
1da177e4 4354 * LOCKING:
cca3974e 4355 * spin_lock_irqsave(host lock)
1da177e4
LT
4356 */
4357void ata_qc_prep(struct ata_queued_cmd *qc)
4358{
4359 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4360 return;
4361
4362 ata_fill_sg(qc);
4363}
4364
d26fc955
AC
4365/**
4366 * ata_dumb_qc_prep - Prepare taskfile for submission
4367 * @qc: Metadata associated with taskfile to be prepared
4368 *
4369 * Prepare ATA taskfile for submission.
4370 *
4371 * LOCKING:
4372 * spin_lock_irqsave(host lock)
4373 */
4374void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4375{
4376 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4377 return;
4378
4379 ata_fill_sg_dumb(qc);
4380}
4381
e46834cd
BK
4382void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4383
0cba632b
JG
4384/**
4385 * ata_sg_init_one - Associate command with memory buffer
4386 * @qc: Command to be associated
4387 * @buf: Memory buffer
4388 * @buflen: Length of memory buffer, in bytes.
4389 *
4390 * Initialize the data-related elements of queued_cmd @qc
4391 * to point to a single memory buffer, @buf of byte length @buflen.
4392 *
4393 * LOCKING:
cca3974e 4394 * spin_lock_irqsave(host lock)
0cba632b
JG
4395 */
4396
1da177e4
LT
4397void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4398{
1da177e4
LT
4399 qc->flags |= ATA_QCFLAG_SINGLE;
4400
cedc9a47 4401 qc->__sg = &qc->sgent;
1da177e4 4402 qc->n_elem = 1;
cedc9a47 4403 qc->orig_n_elem = 1;
1da177e4 4404 qc->buf_virt = buf;
233277ca 4405 qc->nbytes = buflen;
1da177e4 4406
61c0596c 4407 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4408}
4409
0cba632b
JG
4410/**
4411 * ata_sg_init - Associate command with scatter-gather table.
4412 * @qc: Command to be associated
4413 * @sg: Scatter-gather table.
4414 * @n_elem: Number of elements in s/g table.
4415 *
4416 * Initialize the data-related elements of queued_cmd @qc
4417 * to point to a scatter-gather table @sg, containing @n_elem
4418 * elements.
4419 *
4420 * LOCKING:
cca3974e 4421 * spin_lock_irqsave(host lock)
0cba632b
JG
4422 */
4423
1da177e4
LT
4424void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4425 unsigned int n_elem)
4426{
4427 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4428 qc->__sg = sg;
1da177e4 4429 qc->n_elem = n_elem;
cedc9a47 4430 qc->orig_n_elem = n_elem;
1da177e4
LT
4431}
4432
4433/**
0cba632b
JG
4434 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4435 * @qc: Command with memory buffer to be mapped.
4436 *
4437 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4438 *
4439 * LOCKING:
cca3974e 4440 * spin_lock_irqsave(host lock)
1da177e4
LT
4441 *
4442 * RETURNS:
0cba632b 4443 * Zero on success, negative on error.
1da177e4
LT
4444 */
4445
4446static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4447{
4448 struct ata_port *ap = qc->ap;
4449 int dir = qc->dma_dir;
cedc9a47 4450 struct scatterlist *sg = qc->__sg;
1da177e4 4451 dma_addr_t dma_address;
2e242fa9 4452 int trim_sg = 0;
1da177e4 4453
cedc9a47
JG
4454 /* we must lengthen transfers to end on a 32-bit boundary */
4455 qc->pad_len = sg->length & 3;
4456 if (qc->pad_len) {
4457 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4458 struct scatterlist *psg = &qc->pad_sgent;
4459
a4631474 4460 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4461
4462 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4463
4464 if (qc->tf.flags & ATA_TFLAG_WRITE)
4465 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4466 qc->pad_len);
4467
4468 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4469 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4470 /* trim sg */
4471 sg->length -= qc->pad_len;
2e242fa9
TH
4472 if (sg->length == 0)
4473 trim_sg = 1;
cedc9a47
JG
4474
4475 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4476 sg->length, qc->pad_len);
4477 }
4478
2e242fa9
TH
4479 if (trim_sg) {
4480 qc->n_elem--;
e1410f2d
JG
4481 goto skip_map;
4482 }
4483
2f1f610b 4484 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4485 sg->length, dir);
537a95d9
TH
4486 if (dma_mapping_error(dma_address)) {
4487 /* restore sg */
4488 sg->length += qc->pad_len;
1da177e4 4489 return -1;
537a95d9 4490 }
1da177e4
LT
4491
4492 sg_dma_address(sg) = dma_address;
32529e01 4493 sg_dma_len(sg) = sg->length;
1da177e4 4494
2e242fa9 4495skip_map:
1da177e4
LT
4496 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4497 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4498
4499 return 0;
4500}
4501
4502/**
0cba632b
JG
4503 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4504 * @qc: Command with scatter-gather table to be mapped.
4505 *
4506 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4507 *
4508 * LOCKING:
cca3974e 4509 * spin_lock_irqsave(host lock)
1da177e4
LT
4510 *
4511 * RETURNS:
0cba632b 4512 * Zero on success, negative on error.
1da177e4
LT
4513 *
4514 */
4515
4516static int ata_sg_setup(struct ata_queued_cmd *qc)
4517{
4518 struct ata_port *ap = qc->ap;
cedc9a47
JG
4519 struct scatterlist *sg = qc->__sg;
4520 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4521 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4522
44877b4e 4523 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4524 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4525
cedc9a47
JG
4526 /* we must lengthen transfers to end on a 32-bit boundary */
4527 qc->pad_len = lsg->length & 3;
4528 if (qc->pad_len) {
4529 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4530 struct scatterlist *psg = &qc->pad_sgent;
4531 unsigned int offset;
4532
a4631474 4533 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4534
4535 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4536
4537 /*
4538 * psg->page/offset are used to copy to-be-written
4539 * data in this function or read data in ata_sg_clean.
4540 */
4541 offset = lsg->offset + lsg->length - qc->pad_len;
4542 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4543 psg->offset = offset_in_page(offset);
4544
4545 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4546 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4547 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4548 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4549 }
4550
4551 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4552 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4553 /* trim last sg */
4554 lsg->length -= qc->pad_len;
e1410f2d
JG
4555 if (lsg->length == 0)
4556 trim_sg = 1;
cedc9a47
JG
4557
4558 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4559 qc->n_elem - 1, lsg->length, qc->pad_len);
4560 }
4561
e1410f2d
JG
4562 pre_n_elem = qc->n_elem;
4563 if (trim_sg && pre_n_elem)
4564 pre_n_elem--;
4565
4566 if (!pre_n_elem) {
4567 n_elem = 0;
4568 goto skip_map;
4569 }
4570
1da177e4 4571 dir = qc->dma_dir;
2f1f610b 4572 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4573 if (n_elem < 1) {
4574 /* restore last sg */
4575 lsg->length += qc->pad_len;
1da177e4 4576 return -1;
537a95d9 4577 }
1da177e4
LT
4578
4579 DPRINTK("%d sg elements mapped\n", n_elem);
4580
e1410f2d 4581skip_map:
1da177e4
LT
4582 qc->n_elem = n_elem;
4583
4584 return 0;
4585}
4586
0baab86b 4587/**
c893a3ae 4588 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4589 * @buf: Buffer to swap
4590 * @buf_words: Number of 16-bit words in buffer.
4591 *
4592 * Swap halves of 16-bit words if needed to convert from
4593 * little-endian byte order to native cpu byte order, or
4594 * vice-versa.
4595 *
4596 * LOCKING:
6f0ef4fa 4597 * Inherited from caller.
0baab86b 4598 */
1da177e4
LT
4599void swap_buf_le16(u16 *buf, unsigned int buf_words)
4600{
4601#ifdef __BIG_ENDIAN
4602 unsigned int i;
4603
4604 for (i = 0; i < buf_words; i++)
4605 buf[i] = le16_to_cpu(buf[i]);
4606#endif /* __BIG_ENDIAN */
4607}
4608
6ae4cfb5 4609/**
0d5ff566 4610 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4611 * @adev: device to target
6ae4cfb5
AL
4612 * @buf: data buffer
4613 * @buflen: buffer length
344babaa 4614 * @write_data: read/write
6ae4cfb5
AL
4615 *
4616 * Transfer data from/to the device data register by PIO.
4617 *
4618 * LOCKING:
4619 * Inherited from caller.
6ae4cfb5 4620 */
0d5ff566
TH
4621void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4622 unsigned int buflen, int write_data)
1da177e4 4623{
9af5c9c9 4624 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4625 unsigned int words = buflen >> 1;
1da177e4 4626
6ae4cfb5 4627 /* Transfer multiple of 2 bytes */
1da177e4 4628 if (write_data)
0d5ff566 4629 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4630 else
0d5ff566 4631 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4632
4633 /* Transfer trailing 1 byte, if any. */
4634 if (unlikely(buflen & 0x01)) {
4635 u16 align_buf[1] = { 0 };
4636 unsigned char *trailing_buf = buf + buflen - 1;
4637
4638 if (write_data) {
4639 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4640 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4641 } else {
0d5ff566 4642 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4643 memcpy(trailing_buf, align_buf, 1);
4644 }
4645 }
1da177e4
LT
4646}
4647
75e99585 4648/**
0d5ff566 4649 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4650 * @adev: device to target
4651 * @buf: data buffer
4652 * @buflen: buffer length
4653 * @write_data: read/write
4654 *
88574551 4655 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4656 * transfer with interrupts disabled.
4657 *
4658 * LOCKING:
4659 * Inherited from caller.
4660 */
0d5ff566
TH
4661void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4662 unsigned int buflen, int write_data)
75e99585
AC
4663{
4664 unsigned long flags;
4665 local_irq_save(flags);
0d5ff566 4666 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4667 local_irq_restore(flags);
4668}
4669
4670
6ae4cfb5 4671/**
5a5dbd18 4672 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4673 * @qc: Command on going
4674 *
5a5dbd18 4675 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4676 *
4677 * LOCKING:
4678 * Inherited from caller.
4679 */
4680
1da177e4
LT
4681static void ata_pio_sector(struct ata_queued_cmd *qc)
4682{
4683 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4684 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4685 struct ata_port *ap = qc->ap;
4686 struct page *page;
4687 unsigned int offset;
4688 unsigned char *buf;
4689
5a5dbd18 4690 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4691 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4692
4693 page = sg[qc->cursg].page;
726f0785 4694 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4695
4696 /* get the current page and offset */
4697 page = nth_page(page, (offset >> PAGE_SHIFT));
4698 offset %= PAGE_SIZE;
4699
1da177e4
LT
4700 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4701
91b8b313
AL
4702 if (PageHighMem(page)) {
4703 unsigned long flags;
4704
a6b2c5d4 4705 /* FIXME: use a bounce buffer */
91b8b313
AL
4706 local_irq_save(flags);
4707 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4708
91b8b313 4709 /* do the actual data transfer */
5a5dbd18 4710 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4711
91b8b313
AL
4712 kunmap_atomic(buf, KM_IRQ0);
4713 local_irq_restore(flags);
4714 } else {
4715 buf = page_address(page);
5a5dbd18 4716 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4717 }
1da177e4 4718
5a5dbd18
ML
4719 qc->curbytes += qc->sect_size;
4720 qc->cursg_ofs += qc->sect_size;
1da177e4 4721
726f0785 4722 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4723 qc->cursg++;
4724 qc->cursg_ofs = 0;
4725 }
1da177e4 4726}
1da177e4 4727
07f6f7d0 4728/**
5a5dbd18 4729 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4730 * @qc: Command on going
4731 *
5a5dbd18 4732 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4733 * ATA device for the DRQ request.
4734 *
4735 * LOCKING:
4736 * Inherited from caller.
4737 */
1da177e4 4738
07f6f7d0
AL
4739static void ata_pio_sectors(struct ata_queued_cmd *qc)
4740{
4741 if (is_multi_taskfile(&qc->tf)) {
4742 /* READ/WRITE MULTIPLE */
4743 unsigned int nsect;
4744
587005de 4745 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4746
5a5dbd18 4747 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4748 qc->dev->multi_count);
07f6f7d0
AL
4749 while (nsect--)
4750 ata_pio_sector(qc);
4751 } else
4752 ata_pio_sector(qc);
4cc980b3
AL
4753
4754 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
4755}
4756
c71c1857
AL
4757/**
4758 * atapi_send_cdb - Write CDB bytes to hardware
4759 * @ap: Port to which ATAPI device is attached.
4760 * @qc: Taskfile currently active
4761 *
4762 * When device has indicated its readiness to accept
4763 * a CDB, this function is called. Send the CDB.
4764 *
4765 * LOCKING:
4766 * caller.
4767 */
4768
4769static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4770{
4771 /* send SCSI cdb */
4772 DPRINTK("send cdb\n");
db024d53 4773 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4774
a6b2c5d4 4775 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4776 ata_altstatus(ap); /* flush */
4777
4778 switch (qc->tf.protocol) {
4779 case ATA_PROT_ATAPI:
4780 ap->hsm_task_state = HSM_ST;
4781 break;
4782 case ATA_PROT_ATAPI_NODATA:
4783 ap->hsm_task_state = HSM_ST_LAST;
4784 break;
4785 case ATA_PROT_ATAPI_DMA:
4786 ap->hsm_task_state = HSM_ST_LAST;
4787 /* initiate bmdma */
4788 ap->ops->bmdma_start(qc);
4789 break;
4790 }
1da177e4
LT
4791}
4792
6ae4cfb5
AL
4793/**
4794 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4795 * @qc: Command on going
4796 * @bytes: number of bytes
4797 *
4798 * Transfer Transfer data from/to the ATAPI device.
4799 *
4800 * LOCKING:
4801 * Inherited from caller.
4802 *
4803 */
4804
1da177e4
LT
4805static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4806{
4807 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4808 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4809 struct ata_port *ap = qc->ap;
4810 struct page *page;
4811 unsigned char *buf;
4812 unsigned int offset, count;
4813
563a6e1f 4814 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4815 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4816
4817next_sg:
563a6e1f 4818 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4819 /*
563a6e1f
AL
4820 * The end of qc->sg is reached and the device expects
4821 * more data to transfer. In order not to overrun qc->sg
4822 * and fulfill length specified in the byte count register,
4823 * - for read case, discard trailing data from the device
4824 * - for write case, padding zero data to the device
4825 */
4826 u16 pad_buf[1] = { 0 };
4827 unsigned int words = bytes >> 1;
4828 unsigned int i;
4829
4830 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4831 ata_dev_printk(qc->dev, KERN_WARNING,
4832 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4833
4834 for (i = 0; i < words; i++)
a6b2c5d4 4835 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4836
14be71f4 4837 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4838 return;
4839 }
4840
cedc9a47 4841 sg = &qc->__sg[qc->cursg];
1da177e4 4842
1da177e4
LT
4843 page = sg->page;
4844 offset = sg->offset + qc->cursg_ofs;
4845
4846 /* get the current page and offset */
4847 page = nth_page(page, (offset >> PAGE_SHIFT));
4848 offset %= PAGE_SIZE;
4849
6952df03 4850 /* don't overrun current sg */
32529e01 4851 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4852
4853 /* don't cross page boundaries */
4854 count = min(count, (unsigned int)PAGE_SIZE - offset);
4855
7282aa4b
AL
4856 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4857
91b8b313
AL
4858 if (PageHighMem(page)) {
4859 unsigned long flags;
4860
a6b2c5d4 4861 /* FIXME: use bounce buffer */
91b8b313
AL
4862 local_irq_save(flags);
4863 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4864
91b8b313 4865 /* do the actual data transfer */
a6b2c5d4 4866 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4867
91b8b313
AL
4868 kunmap_atomic(buf, KM_IRQ0);
4869 local_irq_restore(flags);
4870 } else {
4871 buf = page_address(page);
a6b2c5d4 4872 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4873 }
1da177e4
LT
4874
4875 bytes -= count;
4876 qc->curbytes += count;
4877 qc->cursg_ofs += count;
4878
32529e01 4879 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4880 qc->cursg++;
4881 qc->cursg_ofs = 0;
4882 }
4883
563a6e1f 4884 if (bytes)
1da177e4 4885 goto next_sg;
1da177e4
LT
4886}
4887
6ae4cfb5
AL
4888/**
4889 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4890 * @qc: Command on going
4891 *
4892 * Transfer Transfer data from/to the ATAPI device.
4893 *
4894 * LOCKING:
4895 * Inherited from caller.
6ae4cfb5
AL
4896 */
4897
1da177e4
LT
4898static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4899{
4900 struct ata_port *ap = qc->ap;
4901 struct ata_device *dev = qc->dev;
4902 unsigned int ireason, bc_lo, bc_hi, bytes;
4903 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4904
eec4c3f3
AL
4905 /* Abuse qc->result_tf for temp storage of intermediate TF
4906 * here to save some kernel stack usage.
4907 * For normal completion, qc->result_tf is not relevant. For
4908 * error, qc->result_tf is later overwritten by ata_qc_complete().
4909 * So, the correctness of qc->result_tf is not affected.
4910 */
4911 ap->ops->tf_read(ap, &qc->result_tf);
4912 ireason = qc->result_tf.nsect;
4913 bc_lo = qc->result_tf.lbam;
4914 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4915 bytes = (bc_hi << 8) | bc_lo;
4916
4917 /* shall be cleared to zero, indicating xfer of data */
4918 if (ireason & (1 << 0))
4919 goto err_out;
4920
4921 /* make sure transfer direction matches expected */
4922 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4923 if (do_write != i_write)
4924 goto err_out;
4925
44877b4e 4926 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4927
1da177e4 4928 __atapi_pio_bytes(qc, bytes);
4cc980b3 4929 ata_altstatus(ap); /* flush */
1da177e4
LT
4930
4931 return;
4932
4933err_out:
f15a1daf 4934 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4935 qc->err_mask |= AC_ERR_HSM;
14be71f4 4936 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4937}
4938
4939/**
c234fb00
AL
4940 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4941 * @ap: the target ata_port
4942 * @qc: qc on going
1da177e4 4943 *
c234fb00
AL
4944 * RETURNS:
4945 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4946 */
c234fb00
AL
4947
4948static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4949{
c234fb00
AL
4950 if (qc->tf.flags & ATA_TFLAG_POLLING)
4951 return 1;
1da177e4 4952
c234fb00
AL
4953 if (ap->hsm_task_state == HSM_ST_FIRST) {
4954 if (qc->tf.protocol == ATA_PROT_PIO &&
4955 (qc->tf.flags & ATA_TFLAG_WRITE))
4956 return 1;
1da177e4 4957
c234fb00
AL
4958 if (is_atapi_taskfile(&qc->tf) &&
4959 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4960 return 1;
fe79e683
AL
4961 }
4962
c234fb00
AL
4963 return 0;
4964}
1da177e4 4965
c17ea20d
TH
4966/**
4967 * ata_hsm_qc_complete - finish a qc running on standard HSM
4968 * @qc: Command to complete
4969 * @in_wq: 1 if called from workqueue, 0 otherwise
4970 *
4971 * Finish @qc which is running on standard HSM.
4972 *
4973 * LOCKING:
cca3974e 4974 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4975 * Otherwise, none on entry and grabs host lock.
4976 */
4977static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4978{
4979 struct ata_port *ap = qc->ap;
4980 unsigned long flags;
4981
4982 if (ap->ops->error_handler) {
4983 if (in_wq) {
ba6a1308 4984 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4985
cca3974e
JG
4986 /* EH might have kicked in while host lock is
4987 * released.
c17ea20d
TH
4988 */
4989 qc = ata_qc_from_tag(ap, qc->tag);
4990 if (qc) {
4991 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4992 ap->ops->irq_on(ap);
c17ea20d
TH
4993 ata_qc_complete(qc);
4994 } else
4995 ata_port_freeze(ap);
4996 }
4997
ba6a1308 4998 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4999 } else {
5000 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5001 ata_qc_complete(qc);
5002 else
5003 ata_port_freeze(ap);
5004 }
5005 } else {
5006 if (in_wq) {
ba6a1308 5007 spin_lock_irqsave(ap->lock, flags);
83625006 5008 ap->ops->irq_on(ap);
c17ea20d 5009 ata_qc_complete(qc);
ba6a1308 5010 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5011 } else
5012 ata_qc_complete(qc);
5013 }
5014}
5015
bb5cb290
AL
5016/**
5017 * ata_hsm_move - move the HSM to the next state.
5018 * @ap: the target ata_port
5019 * @qc: qc on going
5020 * @status: current device status
5021 * @in_wq: 1 if called from workqueue, 0 otherwise
5022 *
5023 * RETURNS:
5024 * 1 when poll next status needed, 0 otherwise.
5025 */
9a1004d0
TH
5026int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5027 u8 status, int in_wq)
e2cec771 5028{
bb5cb290
AL
5029 unsigned long flags = 0;
5030 int poll_next;
5031
6912ccd5
AL
5032 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5033
bb5cb290
AL
5034 /* Make sure ata_qc_issue_prot() does not throw things
5035 * like DMA polling into the workqueue. Notice that
5036 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5037 */
c234fb00 5038 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5039
e2cec771 5040fsm_start:
999bb6f4 5041 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5042 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5043
e2cec771
AL
5044 switch (ap->hsm_task_state) {
5045 case HSM_ST_FIRST:
bb5cb290
AL
5046 /* Send first data block or PACKET CDB */
5047
5048 /* If polling, we will stay in the work queue after
5049 * sending the data. Otherwise, interrupt handler
5050 * takes over after sending the data.
5051 */
5052 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5053
e2cec771 5054 /* check device status */
3655d1d3
AL
5055 if (unlikely((status & ATA_DRQ) == 0)) {
5056 /* handle BSY=0, DRQ=0 as error */
5057 if (likely(status & (ATA_ERR | ATA_DF)))
5058 /* device stops HSM for abort/error */
5059 qc->err_mask |= AC_ERR_DEV;
5060 else
5061 /* HSM violation. Let EH handle this */
5062 qc->err_mask |= AC_ERR_HSM;
5063
14be71f4 5064 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5065 goto fsm_start;
1da177e4
LT
5066 }
5067
71601958
AL
5068 /* Device should not ask for data transfer (DRQ=1)
5069 * when it finds something wrong.
eee6c32f
AL
5070 * We ignore DRQ here and stop the HSM by
5071 * changing hsm_task_state to HSM_ST_ERR and
5072 * let the EH abort the command or reset the device.
71601958
AL
5073 */
5074 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5075 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5076 "error, dev_stat 0x%X\n", status);
3655d1d3 5077 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5078 ap->hsm_task_state = HSM_ST_ERR;
5079 goto fsm_start;
71601958 5080 }
1da177e4 5081
bb5cb290
AL
5082 /* Send the CDB (atapi) or the first data block (ata pio out).
5083 * During the state transition, interrupt handler shouldn't
5084 * be invoked before the data transfer is complete and
5085 * hsm_task_state is changed. Hence, the following locking.
5086 */
5087 if (in_wq)
ba6a1308 5088 spin_lock_irqsave(ap->lock, flags);
1da177e4 5089
bb5cb290
AL
5090 if (qc->tf.protocol == ATA_PROT_PIO) {
5091 /* PIO data out protocol.
5092 * send first data block.
5093 */
0565c26d 5094
bb5cb290
AL
5095 /* ata_pio_sectors() might change the state
5096 * to HSM_ST_LAST. so, the state is changed here
5097 * before ata_pio_sectors().
5098 */
5099 ap->hsm_task_state = HSM_ST;
5100 ata_pio_sectors(qc);
bb5cb290
AL
5101 } else
5102 /* send CDB */
5103 atapi_send_cdb(ap, qc);
5104
5105 if (in_wq)
ba6a1308 5106 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5107
5108 /* if polling, ata_pio_task() handles the rest.
5109 * otherwise, interrupt handler takes over from here.
5110 */
e2cec771 5111 break;
1c848984 5112
e2cec771
AL
5113 case HSM_ST:
5114 /* complete command or read/write the data register */
5115 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5116 /* ATAPI PIO protocol */
5117 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5118 /* No more data to transfer or device error.
5119 * Device error will be tagged in HSM_ST_LAST.
5120 */
e2cec771
AL
5121 ap->hsm_task_state = HSM_ST_LAST;
5122 goto fsm_start;
5123 }
1da177e4 5124
71601958
AL
5125 /* Device should not ask for data transfer (DRQ=1)
5126 * when it finds something wrong.
eee6c32f
AL
5127 * We ignore DRQ here and stop the HSM by
5128 * changing hsm_task_state to HSM_ST_ERR and
5129 * let the EH abort the command or reset the device.
71601958
AL
5130 */
5131 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5132 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5133 "device error, dev_stat 0x%X\n",
5134 status);
3655d1d3 5135 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5136 ap->hsm_task_state = HSM_ST_ERR;
5137 goto fsm_start;
71601958 5138 }
1da177e4 5139
e2cec771 5140 atapi_pio_bytes(qc);
7fb6ec28 5141
e2cec771
AL
5142 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5143 /* bad ireason reported by device */
5144 goto fsm_start;
1da177e4 5145
e2cec771
AL
5146 } else {
5147 /* ATA PIO protocol */
5148 if (unlikely((status & ATA_DRQ) == 0)) {
5149 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5150 if (likely(status & (ATA_ERR | ATA_DF)))
5151 /* device stops HSM for abort/error */
5152 qc->err_mask |= AC_ERR_DEV;
5153 else
55a8e2c8
TH
5154 /* HSM violation. Let EH handle this.
5155 * Phantom devices also trigger this
5156 * condition. Mark hint.
5157 */
5158 qc->err_mask |= AC_ERR_HSM |
5159 AC_ERR_NODEV_HINT;
3655d1d3 5160
e2cec771
AL
5161 ap->hsm_task_state = HSM_ST_ERR;
5162 goto fsm_start;
5163 }
1da177e4 5164
eee6c32f
AL
5165 /* For PIO reads, some devices may ask for
5166 * data transfer (DRQ=1) alone with ERR=1.
5167 * We respect DRQ here and transfer one
5168 * block of junk data before changing the
5169 * hsm_task_state to HSM_ST_ERR.
5170 *
5171 * For PIO writes, ERR=1 DRQ=1 doesn't make
5172 * sense since the data block has been
5173 * transferred to the device.
71601958
AL
5174 */
5175 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5176 /* data might be corrputed */
5177 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5178
5179 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5180 ata_pio_sectors(qc);
eee6c32f
AL
5181 status = ata_wait_idle(ap);
5182 }
5183
3655d1d3
AL
5184 if (status & (ATA_BUSY | ATA_DRQ))
5185 qc->err_mask |= AC_ERR_HSM;
5186
eee6c32f
AL
5187 /* ata_pio_sectors() might change the
5188 * state to HSM_ST_LAST. so, the state
5189 * is changed after ata_pio_sectors().
5190 */
5191 ap->hsm_task_state = HSM_ST_ERR;
5192 goto fsm_start;
71601958
AL
5193 }
5194
e2cec771
AL
5195 ata_pio_sectors(qc);
5196
5197 if (ap->hsm_task_state == HSM_ST_LAST &&
5198 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5199 /* all data read */
52a32205 5200 status = ata_wait_idle(ap);
e2cec771
AL
5201 goto fsm_start;
5202 }
5203 }
5204
bb5cb290 5205 poll_next = 1;
1da177e4
LT
5206 break;
5207
14be71f4 5208 case HSM_ST_LAST:
6912ccd5
AL
5209 if (unlikely(!ata_ok(status))) {
5210 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5211 ap->hsm_task_state = HSM_ST_ERR;
5212 goto fsm_start;
5213 }
5214
5215 /* no more data to transfer */
4332a771 5216 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5217 ap->print_id, qc->dev->devno, status);
e2cec771 5218
6912ccd5
AL
5219 WARN_ON(qc->err_mask);
5220
e2cec771 5221 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5222
e2cec771 5223 /* complete taskfile transaction */
c17ea20d 5224 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5225
5226 poll_next = 0;
1da177e4
LT
5227 break;
5228
14be71f4 5229 case HSM_ST_ERR:
e2cec771
AL
5230 /* make sure qc->err_mask is available to
5231 * know what's wrong and recover
5232 */
5233 WARN_ON(qc->err_mask == 0);
5234
5235 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5236
999bb6f4 5237 /* complete taskfile transaction */
c17ea20d 5238 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5239
5240 poll_next = 0;
e2cec771
AL
5241 break;
5242 default:
bb5cb290 5243 poll_next = 0;
6912ccd5 5244 BUG();
1da177e4
LT
5245 }
5246
bb5cb290 5247 return poll_next;
1da177e4
LT
5248}
5249
65f27f38 5250static void ata_pio_task(struct work_struct *work)
8061f5f0 5251{
65f27f38
DH
5252 struct ata_port *ap =
5253 container_of(work, struct ata_port, port_task.work);
5254 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5255 u8 status;
a1af3734 5256 int poll_next;
8061f5f0 5257
7fb6ec28 5258fsm_start:
a1af3734 5259 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5260
a1af3734
AL
5261 /*
5262 * This is purely heuristic. This is a fast path.
5263 * Sometimes when we enter, BSY will be cleared in
5264 * a chk-status or two. If not, the drive is probably seeking
5265 * or something. Snooze for a couple msecs, then
5266 * chk-status again. If still busy, queue delayed work.
5267 */
5268 status = ata_busy_wait(ap, ATA_BUSY, 5);
5269 if (status & ATA_BUSY) {
5270 msleep(2);
5271 status = ata_busy_wait(ap, ATA_BUSY, 10);
5272 if (status & ATA_BUSY) {
31ce6dae 5273 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5274 return;
5275 }
8061f5f0
TH
5276 }
5277
a1af3734
AL
5278 /* move the HSM */
5279 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5280
a1af3734
AL
5281 /* another command or interrupt handler
5282 * may be running at this point.
5283 */
5284 if (poll_next)
7fb6ec28 5285 goto fsm_start;
8061f5f0
TH
5286}
5287
1da177e4
LT
5288/**
5289 * ata_qc_new - Request an available ATA command, for queueing
5290 * @ap: Port associated with device @dev
5291 * @dev: Device from whom we request an available command structure
5292 *
5293 * LOCKING:
0cba632b 5294 * None.
1da177e4
LT
5295 */
5296
5297static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5298{
5299 struct ata_queued_cmd *qc = NULL;
5300 unsigned int i;
5301
e3180499 5302 /* no command while frozen */
b51e9e5d 5303 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5304 return NULL;
5305
2ab7db1f
TH
5306 /* the last tag is reserved for internal command. */
5307 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5308 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5309 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5310 break;
5311 }
5312
5313 if (qc)
5314 qc->tag = i;
5315
5316 return qc;
5317}
5318
5319/**
5320 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5321 * @dev: Device from whom we request an available command structure
5322 *
5323 * LOCKING:
0cba632b 5324 * None.
1da177e4
LT
5325 */
5326
3373efd8 5327struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5328{
9af5c9c9 5329 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5330 struct ata_queued_cmd *qc;
5331
5332 qc = ata_qc_new(ap);
5333 if (qc) {
1da177e4
LT
5334 qc->scsicmd = NULL;
5335 qc->ap = ap;
5336 qc->dev = dev;
1da177e4 5337
2c13b7ce 5338 ata_qc_reinit(qc);
1da177e4
LT
5339 }
5340
5341 return qc;
5342}
5343
1da177e4
LT
5344/**
5345 * ata_qc_free - free unused ata_queued_cmd
5346 * @qc: Command to complete
5347 *
5348 * Designed to free unused ata_queued_cmd object
5349 * in case something prevents using it.
5350 *
5351 * LOCKING:
cca3974e 5352 * spin_lock_irqsave(host lock)
1da177e4
LT
5353 */
5354void ata_qc_free(struct ata_queued_cmd *qc)
5355{
4ba946e9
TH
5356 struct ata_port *ap = qc->ap;
5357 unsigned int tag;
5358
a4631474 5359 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5360
4ba946e9
TH
5361 qc->flags = 0;
5362 tag = qc->tag;
5363 if (likely(ata_tag_valid(tag))) {
4ba946e9 5364 qc->tag = ATA_TAG_POISON;
6cec4a39 5365 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5366 }
1da177e4
LT
5367}
5368
76014427 5369void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5370{
dedaf2b0 5371 struct ata_port *ap = qc->ap;
9af5c9c9 5372 struct ata_link *link = qc->dev->link;
dedaf2b0 5373
a4631474
TH
5374 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5375 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5376
5377 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5378 ata_sg_clean(qc);
5379
7401abf2 5380 /* command should be marked inactive atomically with qc completion */
dedaf2b0 5381 if (qc->tf.protocol == ATA_PROT_NCQ)
9af5c9c9 5382 link->sactive &= ~(1 << qc->tag);
dedaf2b0 5383 else
9af5c9c9 5384 link->active_tag = ATA_TAG_POISON;
7401abf2 5385
3f3791d3
AL
5386 /* atapi: mark qc as inactive to prevent the interrupt handler
5387 * from completing the command twice later, before the error handler
5388 * is called. (when rc != 0 and atapi request sense is needed)
5389 */
5390 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5391 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5392
1da177e4 5393 /* call completion callback */
77853bf2 5394 qc->complete_fn(qc);
1da177e4
LT
5395}
5396
39599a53
TH
5397static void fill_result_tf(struct ata_queued_cmd *qc)
5398{
5399 struct ata_port *ap = qc->ap;
5400
39599a53 5401 qc->result_tf.flags = qc->tf.flags;
4742d54f 5402 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5403}
5404
f686bcb8
TH
5405/**
5406 * ata_qc_complete - Complete an active ATA command
5407 * @qc: Command to complete
5408 * @err_mask: ATA Status register contents
5409 *
5410 * Indicate to the mid and upper layers that an ATA
5411 * command has completed, with either an ok or not-ok status.
5412 *
5413 * LOCKING:
cca3974e 5414 * spin_lock_irqsave(host lock)
f686bcb8
TH
5415 */
5416void ata_qc_complete(struct ata_queued_cmd *qc)
5417{
5418 struct ata_port *ap = qc->ap;
5419
5420 /* XXX: New EH and old EH use different mechanisms to
5421 * synchronize EH with regular execution path.
5422 *
5423 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5424 * Normal execution path is responsible for not accessing a
5425 * failed qc. libata core enforces the rule by returning NULL
5426 * from ata_qc_from_tag() for failed qcs.
5427 *
5428 * Old EH depends on ata_qc_complete() nullifying completion
5429 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5430 * not synchronize with interrupt handler. Only PIO task is
5431 * taken care of.
5432 */
5433 if (ap->ops->error_handler) {
b51e9e5d 5434 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5435
5436 if (unlikely(qc->err_mask))
5437 qc->flags |= ATA_QCFLAG_FAILED;
5438
5439 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5440 if (!ata_tag_internal(qc->tag)) {
5441 /* always fill result TF for failed qc */
39599a53 5442 fill_result_tf(qc);
f686bcb8
TH
5443 ata_qc_schedule_eh(qc);
5444 return;
5445 }
5446 }
5447
5448 /* read result TF if requested */
5449 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5450 fill_result_tf(qc);
f686bcb8
TH
5451
5452 __ata_qc_complete(qc);
5453 } else {
5454 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5455 return;
5456
5457 /* read result TF if failed or requested */
5458 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5459 fill_result_tf(qc);
f686bcb8
TH
5460
5461 __ata_qc_complete(qc);
5462 }
5463}
5464
dedaf2b0
TH
5465/**
5466 * ata_qc_complete_multiple - Complete multiple qcs successfully
5467 * @ap: port in question
5468 * @qc_active: new qc_active mask
5469 * @finish_qc: LLDD callback invoked before completing a qc
5470 *
5471 * Complete in-flight commands. This functions is meant to be
5472 * called from low-level driver's interrupt routine to complete
5473 * requests normally. ap->qc_active and @qc_active is compared
5474 * and commands are completed accordingly.
5475 *
5476 * LOCKING:
cca3974e 5477 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5478 *
5479 * RETURNS:
5480 * Number of completed commands on success, -errno otherwise.
5481 */
5482int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5483 void (*finish_qc)(struct ata_queued_cmd *))
5484{
5485 int nr_done = 0;
5486 u32 done_mask;
5487 int i;
5488
5489 done_mask = ap->qc_active ^ qc_active;
5490
5491 if (unlikely(done_mask & qc_active)) {
5492 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5493 "(%08x->%08x)\n", ap->qc_active, qc_active);
5494 return -EINVAL;
5495 }
5496
5497 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5498 struct ata_queued_cmd *qc;
5499
5500 if (!(done_mask & (1 << i)))
5501 continue;
5502
5503 if ((qc = ata_qc_from_tag(ap, i))) {
5504 if (finish_qc)
5505 finish_qc(qc);
5506 ata_qc_complete(qc);
5507 nr_done++;
5508 }
5509 }
5510
5511 return nr_done;
5512}
5513
1da177e4
LT
5514static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5515{
5516 struct ata_port *ap = qc->ap;
5517
5518 switch (qc->tf.protocol) {
3dc1d881 5519 case ATA_PROT_NCQ:
1da177e4
LT
5520 case ATA_PROT_DMA:
5521 case ATA_PROT_ATAPI_DMA:
5522 return 1;
5523
5524 case ATA_PROT_ATAPI:
5525 case ATA_PROT_PIO:
1da177e4
LT
5526 if (ap->flags & ATA_FLAG_PIO_DMA)
5527 return 1;
5528
5529 /* fall through */
5530
5531 default:
5532 return 0;
5533 }
5534
5535 /* never reached */
5536}
5537
5538/**
5539 * ata_qc_issue - issue taskfile to device
5540 * @qc: command to issue to device
5541 *
5542 * Prepare an ATA command to submission to device.
5543 * This includes mapping the data into a DMA-able
5544 * area, filling in the S/G table, and finally
5545 * writing the taskfile to hardware, starting the command.
5546 *
5547 * LOCKING:
cca3974e 5548 * spin_lock_irqsave(host lock)
1da177e4 5549 */
8e0e694a 5550void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5551{
5552 struct ata_port *ap = qc->ap;
9af5c9c9 5553 struct ata_link *link = qc->dev->link;
1da177e4 5554
dedaf2b0
TH
5555 /* Make sure only one non-NCQ command is outstanding. The
5556 * check is skipped for old EH because it reuses active qc to
5557 * request ATAPI sense.
5558 */
9af5c9c9 5559 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5560
5561 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9
TH
5562 WARN_ON(link->sactive & (1 << qc->tag));
5563 link->sactive |= 1 << qc->tag;
dedaf2b0 5564 } else {
9af5c9c9
TH
5565 WARN_ON(link->sactive);
5566 link->active_tag = qc->tag;
dedaf2b0
TH
5567 }
5568
e4a70e76 5569 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5570 ap->qc_active |= 1 << qc->tag;
e4a70e76 5571
1da177e4
LT
5572 if (ata_should_dma_map(qc)) {
5573 if (qc->flags & ATA_QCFLAG_SG) {
5574 if (ata_sg_setup(qc))
8e436af9 5575 goto sg_err;
1da177e4
LT
5576 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5577 if (ata_sg_setup_one(qc))
8e436af9 5578 goto sg_err;
1da177e4
LT
5579 }
5580 } else {
5581 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5582 }
5583
5584 ap->ops->qc_prep(qc);
5585
8e0e694a
TH
5586 qc->err_mask |= ap->ops->qc_issue(qc);
5587 if (unlikely(qc->err_mask))
5588 goto err;
5589 return;
1da177e4 5590
8e436af9
TH
5591sg_err:
5592 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5593 qc->err_mask |= AC_ERR_SYSTEM;
5594err:
5595 ata_qc_complete(qc);
1da177e4
LT
5596}
5597
5598/**
5599 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5600 * @qc: command to issue to device
5601 *
5602 * Using various libata functions and hooks, this function
5603 * starts an ATA command. ATA commands are grouped into
5604 * classes called "protocols", and issuing each type of protocol
5605 * is slightly different.
5606 *
0baab86b
EF
5607 * May be used as the qc_issue() entry in ata_port_operations.
5608 *
1da177e4 5609 * LOCKING:
cca3974e 5610 * spin_lock_irqsave(host lock)
1da177e4
LT
5611 *
5612 * RETURNS:
9a3d9eb0 5613 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5614 */
5615
9a3d9eb0 5616unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5617{
5618 struct ata_port *ap = qc->ap;
5619
e50362ec
AL
5620 /* Use polling pio if the LLD doesn't handle
5621 * interrupt driven pio and atapi CDB interrupt.
5622 */
5623 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5624 switch (qc->tf.protocol) {
5625 case ATA_PROT_PIO:
e3472cbe 5626 case ATA_PROT_NODATA:
e50362ec
AL
5627 case ATA_PROT_ATAPI:
5628 case ATA_PROT_ATAPI_NODATA:
5629 qc->tf.flags |= ATA_TFLAG_POLLING;
5630 break;
5631 case ATA_PROT_ATAPI_DMA:
5632 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5633 /* see ata_dma_blacklisted() */
e50362ec
AL
5634 BUG();
5635 break;
5636 default:
5637 break;
5638 }
5639 }
5640
312f7da2 5641 /* select the device */
1da177e4
LT
5642 ata_dev_select(ap, qc->dev->devno, 1, 0);
5643
312f7da2 5644 /* start the command */
1da177e4
LT
5645 switch (qc->tf.protocol) {
5646 case ATA_PROT_NODATA:
312f7da2
AL
5647 if (qc->tf.flags & ATA_TFLAG_POLLING)
5648 ata_qc_set_polling(qc);
5649
e5338254 5650 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5651 ap->hsm_task_state = HSM_ST_LAST;
5652
5653 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5654 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5655
1da177e4
LT
5656 break;
5657
5658 case ATA_PROT_DMA:
587005de 5659 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5660
1da177e4
LT
5661 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5662 ap->ops->bmdma_setup(qc); /* set up bmdma */
5663 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5664 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5665 break;
5666
312f7da2
AL
5667 case ATA_PROT_PIO:
5668 if (qc->tf.flags & ATA_TFLAG_POLLING)
5669 ata_qc_set_polling(qc);
1da177e4 5670
e5338254 5671 ata_tf_to_host(ap, &qc->tf);
312f7da2 5672
54f00389
AL
5673 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5674 /* PIO data out protocol */
5675 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5676 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5677
5678 /* always send first data block using
e27486db 5679 * the ata_pio_task() codepath.
54f00389 5680 */
312f7da2 5681 } else {
54f00389
AL
5682 /* PIO data in protocol */
5683 ap->hsm_task_state = HSM_ST;
5684
5685 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5686 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5687
5688 /* if polling, ata_pio_task() handles the rest.
5689 * otherwise, interrupt handler takes over from here.
5690 */
312f7da2
AL
5691 }
5692
1da177e4
LT
5693 break;
5694
1da177e4 5695 case ATA_PROT_ATAPI:
1da177e4 5696 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5697 if (qc->tf.flags & ATA_TFLAG_POLLING)
5698 ata_qc_set_polling(qc);
5699
e5338254 5700 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5701
312f7da2
AL
5702 ap->hsm_task_state = HSM_ST_FIRST;
5703
5704 /* send cdb by polling if no cdb interrupt */
5705 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5706 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5707 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5708 break;
5709
5710 case ATA_PROT_ATAPI_DMA:
587005de 5711 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5712
1da177e4
LT
5713 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5714 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5715 ap->hsm_task_state = HSM_ST_FIRST;
5716
5717 /* send cdb by polling if no cdb interrupt */
5718 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5719 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5720 break;
5721
5722 default:
5723 WARN_ON(1);
9a3d9eb0 5724 return AC_ERR_SYSTEM;
1da177e4
LT
5725 }
5726
5727 return 0;
5728}
5729
1da177e4
LT
5730/**
5731 * ata_host_intr - Handle host interrupt for given (port, task)
5732 * @ap: Port on which interrupt arrived (possibly...)
5733 * @qc: Taskfile currently active in engine
5734 *
5735 * Handle host interrupt for given queued command. Currently,
5736 * only DMA interrupts are handled. All other commands are
5737 * handled via polling with interrupts disabled (nIEN bit).
5738 *
5739 * LOCKING:
cca3974e 5740 * spin_lock_irqsave(host lock)
1da177e4
LT
5741 *
5742 * RETURNS:
5743 * One if interrupt was handled, zero if not (shared irq).
5744 */
5745
5746inline unsigned int ata_host_intr (struct ata_port *ap,
5747 struct ata_queued_cmd *qc)
5748{
9af5c9c9 5749 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 5750 u8 status, host_stat = 0;
1da177e4 5751
312f7da2 5752 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5753 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5754
312f7da2
AL
5755 /* Check whether we are expecting interrupt in this state */
5756 switch (ap->hsm_task_state) {
5757 case HSM_ST_FIRST:
6912ccd5
AL
5758 /* Some pre-ATAPI-4 devices assert INTRQ
5759 * at this state when ready to receive CDB.
5760 */
1da177e4 5761
312f7da2
AL
5762 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5763 * The flag was turned on only for atapi devices.
5764 * No need to check is_atapi_taskfile(&qc->tf) again.
5765 */
5766 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5767 goto idle_irq;
1da177e4 5768 break;
312f7da2
AL
5769 case HSM_ST_LAST:
5770 if (qc->tf.protocol == ATA_PROT_DMA ||
5771 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5772 /* check status of DMA engine */
5773 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5774 VPRINTK("ata%u: host_stat 0x%X\n",
5775 ap->print_id, host_stat);
312f7da2
AL
5776
5777 /* if it's not our irq... */
5778 if (!(host_stat & ATA_DMA_INTR))
5779 goto idle_irq;
5780
5781 /* before we do anything else, clear DMA-Start bit */
5782 ap->ops->bmdma_stop(qc);
a4f16610
AL
5783
5784 if (unlikely(host_stat & ATA_DMA_ERR)) {
5785 /* error when transfering data to/from memory */
5786 qc->err_mask |= AC_ERR_HOST_BUS;
5787 ap->hsm_task_state = HSM_ST_ERR;
5788 }
312f7da2
AL
5789 }
5790 break;
5791 case HSM_ST:
5792 break;
1da177e4
LT
5793 default:
5794 goto idle_irq;
5795 }
5796
312f7da2
AL
5797 /* check altstatus */
5798 status = ata_altstatus(ap);
5799 if (status & ATA_BUSY)
5800 goto idle_irq;
1da177e4 5801
312f7da2
AL
5802 /* check main status, clearing INTRQ */
5803 status = ata_chk_status(ap);
5804 if (unlikely(status & ATA_BUSY))
5805 goto idle_irq;
1da177e4 5806
312f7da2
AL
5807 /* ack bmdma irq events */
5808 ap->ops->irq_clear(ap);
1da177e4 5809
bb5cb290 5810 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5811
5812 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5813 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5814 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5815
1da177e4
LT
5816 return 1; /* irq handled */
5817
5818idle_irq:
5819 ap->stats.idle_irq++;
5820
5821#ifdef ATA_IRQ_TRAP
5822 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
5823 ata_chk_status(ap);
5824 ap->ops->irq_clear(ap);
f15a1daf 5825 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5826 return 1;
1da177e4
LT
5827 }
5828#endif
5829 return 0; /* irq not handled */
5830}
5831
5832/**
5833 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5834 * @irq: irq line (unused)
cca3974e 5835 * @dev_instance: pointer to our ata_host information structure
1da177e4 5836 *
0cba632b
JG
5837 * Default interrupt handler for PCI IDE devices. Calls
5838 * ata_host_intr() for each port that is not disabled.
5839 *
1da177e4 5840 * LOCKING:
cca3974e 5841 * Obtains host lock during operation.
1da177e4
LT
5842 *
5843 * RETURNS:
0cba632b 5844 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5845 */
5846
7d12e780 5847irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5848{
cca3974e 5849 struct ata_host *host = dev_instance;
1da177e4
LT
5850 unsigned int i;
5851 unsigned int handled = 0;
5852 unsigned long flags;
5853
5854 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5855 spin_lock_irqsave(&host->lock, flags);
1da177e4 5856
cca3974e 5857 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5858 struct ata_port *ap;
5859
cca3974e 5860 ap = host->ports[i];
c1389503 5861 if (ap &&
029f5468 5862 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5863 struct ata_queued_cmd *qc;
5864
9af5c9c9 5865 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 5866 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5867 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5868 handled |= ata_host_intr(ap, qc);
5869 }
5870 }
5871
cca3974e 5872 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5873
5874 return IRQ_RETVAL(handled);
5875}
5876
34bf2170
TH
5877/**
5878 * sata_scr_valid - test whether SCRs are accessible
936fd732 5879 * @link: ATA link to test SCR accessibility for
34bf2170 5880 *
936fd732 5881 * Test whether SCRs are accessible for @link.
34bf2170
TH
5882 *
5883 * LOCKING:
5884 * None.
5885 *
5886 * RETURNS:
5887 * 1 if SCRs are accessible, 0 otherwise.
5888 */
936fd732 5889int sata_scr_valid(struct ata_link *link)
34bf2170 5890{
936fd732
TH
5891 struct ata_port *ap = link->ap;
5892
a16abc0b 5893 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
5894}
5895
5896/**
5897 * sata_scr_read - read SCR register of the specified port
936fd732 5898 * @link: ATA link to read SCR for
34bf2170
TH
5899 * @reg: SCR to read
5900 * @val: Place to store read value
5901 *
936fd732 5902 * Read SCR register @reg of @link into *@val. This function is
34bf2170
TH
5903 * guaranteed to succeed if the cable type of the port is SATA
5904 * and the port implements ->scr_read.
5905 *
5906 * LOCKING:
5907 * None.
5908 *
5909 * RETURNS:
5910 * 0 on success, negative errno on failure.
5911 */
936fd732 5912int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 5913{
936fd732
TH
5914 struct ata_port *ap = link->ap;
5915
5916 if (sata_scr_valid(link))
da3dbb17 5917 return ap->ops->scr_read(ap, reg, val);
34bf2170
TH
5918 return -EOPNOTSUPP;
5919}
5920
5921/**
5922 * sata_scr_write - write SCR register of the specified port
936fd732 5923 * @link: ATA link to write SCR for
34bf2170
TH
5924 * @reg: SCR to write
5925 * @val: value to write
5926 *
936fd732 5927 * Write @val to SCR register @reg of @link. This function is
34bf2170
TH
5928 * guaranteed to succeed if the cable type of the port is SATA
5929 * and the port implements ->scr_read.
5930 *
5931 * LOCKING:
5932 * None.
5933 *
5934 * RETURNS:
5935 * 0 on success, negative errno on failure.
5936 */
936fd732 5937int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 5938{
936fd732
TH
5939 struct ata_port *ap = link->ap;
5940
5941 if (sata_scr_valid(link))
da3dbb17 5942 return ap->ops->scr_write(ap, reg, val);
34bf2170
TH
5943 return -EOPNOTSUPP;
5944}
5945
5946/**
5947 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 5948 * @link: ATA link to write SCR for
34bf2170
TH
5949 * @reg: SCR to write
5950 * @val: value to write
5951 *
5952 * This function is identical to sata_scr_write() except that this
5953 * function performs flush after writing to the register.
5954 *
5955 * LOCKING:
5956 * None.
5957 *
5958 * RETURNS:
5959 * 0 on success, negative errno on failure.
5960 */
936fd732 5961int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 5962{
936fd732 5963 struct ata_port *ap = link->ap;
da3dbb17
TH
5964 int rc;
5965
936fd732 5966 if (sata_scr_valid(link)) {
da3dbb17
TH
5967 rc = ap->ops->scr_write(ap, reg, val);
5968 if (rc == 0)
5969 rc = ap->ops->scr_read(ap, reg, &val);
5970 return rc;
34bf2170
TH
5971 }
5972 return -EOPNOTSUPP;
5973}
5974
5975/**
936fd732
TH
5976 * ata_link_online - test whether the given link is online
5977 * @link: ATA link to test
34bf2170 5978 *
936fd732
TH
5979 * Test whether @link is online. Note that this function returns
5980 * 0 if online status of @link cannot be obtained, so
5981 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5982 *
5983 * LOCKING:
5984 * None.
5985 *
5986 * RETURNS:
5987 * 1 if the port online status is available and online.
5988 */
936fd732 5989int ata_link_online(struct ata_link *link)
34bf2170
TH
5990{
5991 u32 sstatus;
5992
936fd732
TH
5993 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5994 (sstatus & 0xf) == 0x3)
34bf2170
TH
5995 return 1;
5996 return 0;
5997}
5998
5999/**
936fd732
TH
6000 * ata_link_offline - test whether the given link is offline
6001 * @link: ATA link to test
34bf2170 6002 *
936fd732
TH
6003 * Test whether @link is offline. Note that this function
6004 * returns 0 if offline status of @link cannot be obtained, so
6005 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6006 *
6007 * LOCKING:
6008 * None.
6009 *
6010 * RETURNS:
6011 * 1 if the port offline status is available and offline.
6012 */
936fd732 6013int ata_link_offline(struct ata_link *link)
34bf2170
TH
6014{
6015 u32 sstatus;
6016
936fd732
TH
6017 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6018 (sstatus & 0xf) != 0x3)
34bf2170
TH
6019 return 1;
6020 return 0;
6021}
0baab86b 6022
77b08fb5 6023int ata_flush_cache(struct ata_device *dev)
9b847548 6024{
977e6b9f 6025 unsigned int err_mask;
9b847548
JA
6026 u8 cmd;
6027
6028 if (!ata_try_flush_cache(dev))
6029 return 0;
6030
6fc49adb 6031 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6032 cmd = ATA_CMD_FLUSH_EXT;
6033 else
6034 cmd = ATA_CMD_FLUSH;
6035
4f34337b
AC
6036 /* This is wrong. On a failed flush we get back the LBA of the lost
6037 sector and we should (assuming it wasn't aborted as unknown) issue
6038 a further flush command to continue the writeback until it
6039 does not error */
977e6b9f
TH
6040 err_mask = ata_do_simple_cmd(dev, cmd);
6041 if (err_mask) {
6042 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6043 return -EIO;
6044 }
6045
6046 return 0;
9b847548
JA
6047}
6048
6ffa01d8 6049#ifdef CONFIG_PM
cca3974e
JG
6050static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6051 unsigned int action, unsigned int ehi_flags,
6052 int wait)
500530f6
TH
6053{
6054 unsigned long flags;
6055 int i, rc;
6056
cca3974e
JG
6057 for (i = 0; i < host->n_ports; i++) {
6058 struct ata_port *ap = host->ports[i];
e3667ebf 6059 struct ata_link *link;
500530f6
TH
6060
6061 /* Previous resume operation might still be in
6062 * progress. Wait for PM_PENDING to clear.
6063 */
6064 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6065 ata_port_wait_eh(ap);
6066 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6067 }
6068
6069 /* request PM ops to EH */
6070 spin_lock_irqsave(ap->lock, flags);
6071
6072 ap->pm_mesg = mesg;
6073 if (wait) {
6074 rc = 0;
6075 ap->pm_result = &rc;
6076 }
6077
6078 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6079 __ata_port_for_each_link(link, ap) {
6080 link->eh_info.action |= action;
6081 link->eh_info.flags |= ehi_flags;
6082 }
500530f6
TH
6083
6084 ata_port_schedule_eh(ap);
6085
6086 spin_unlock_irqrestore(ap->lock, flags);
6087
6088 /* wait and check result */
6089 if (wait) {
6090 ata_port_wait_eh(ap);
6091 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6092 if (rc)
6093 return rc;
6094 }
6095 }
6096
6097 return 0;
6098}
6099
6100/**
cca3974e
JG
6101 * ata_host_suspend - suspend host
6102 * @host: host to suspend
500530f6
TH
6103 * @mesg: PM message
6104 *
cca3974e 6105 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6106 * function requests EH to perform PM operations and waits for EH
6107 * to finish.
6108 *
6109 * LOCKING:
6110 * Kernel thread context (may sleep).
6111 *
6112 * RETURNS:
6113 * 0 on success, -errno on failure.
6114 */
cca3974e 6115int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6116{
9666f400 6117 int rc;
500530f6 6118
cca3974e 6119 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6120 if (rc == 0)
6121 host->dev->power.power_state = mesg;
500530f6
TH
6122 return rc;
6123}
6124
6125/**
cca3974e
JG
6126 * ata_host_resume - resume host
6127 * @host: host to resume
500530f6 6128 *
cca3974e 6129 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6130 * function requests EH to perform PM operations and returns.
6131 * Note that all resume operations are performed parallely.
6132 *
6133 * LOCKING:
6134 * Kernel thread context (may sleep).
6135 */
cca3974e 6136void ata_host_resume(struct ata_host *host)
500530f6 6137{
cca3974e
JG
6138 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6139 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6140 host->dev->power.power_state = PMSG_ON;
500530f6 6141}
6ffa01d8 6142#endif
500530f6 6143
c893a3ae
RD
6144/**
6145 * ata_port_start - Set port up for dma.
6146 * @ap: Port to initialize
6147 *
6148 * Called just after data structures for each port are
6149 * initialized. Allocates space for PRD table.
6150 *
6151 * May be used as the port_start() entry in ata_port_operations.
6152 *
6153 * LOCKING:
6154 * Inherited from caller.
6155 */
f0d36efd 6156int ata_port_start(struct ata_port *ap)
1da177e4 6157{
2f1f610b 6158 struct device *dev = ap->dev;
6037d6bb 6159 int rc;
1da177e4 6160
f0d36efd
TH
6161 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6162 GFP_KERNEL);
1da177e4
LT
6163 if (!ap->prd)
6164 return -ENOMEM;
6165
6037d6bb 6166 rc = ata_pad_alloc(ap, dev);
f0d36efd 6167 if (rc)
6037d6bb 6168 return rc;
1da177e4 6169
f0d36efd
TH
6170 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6171 (unsigned long long)ap->prd_dma);
1da177e4
LT
6172 return 0;
6173}
6174
3ef3b43d
TH
6175/**
6176 * ata_dev_init - Initialize an ata_device structure
6177 * @dev: Device structure to initialize
6178 *
6179 * Initialize @dev in preparation for probing.
6180 *
6181 * LOCKING:
6182 * Inherited from caller.
6183 */
6184void ata_dev_init(struct ata_device *dev)
6185{
9af5c9c9
TH
6186 struct ata_link *link = dev->link;
6187 struct ata_port *ap = link->ap;
72fa4b74
TH
6188 unsigned long flags;
6189
5a04bf4b 6190 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6191 link->sata_spd_limit = link->hw_sata_spd_limit;
6192 link->sata_spd = 0;
5a04bf4b 6193
72fa4b74
TH
6194 /* High bits of dev->flags are used to record warm plug
6195 * requests which occur asynchronously. Synchronize using
cca3974e 6196 * host lock.
72fa4b74 6197 */
ba6a1308 6198 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6199 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6200 dev->horkage = 0;
ba6a1308 6201 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6202
72fa4b74
TH
6203 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6204 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6205 dev->pio_mask = UINT_MAX;
6206 dev->mwdma_mask = UINT_MAX;
6207 dev->udma_mask = UINT_MAX;
6208}
6209
4fb37a25
TH
6210/**
6211 * ata_link_init - Initialize an ata_link structure
6212 * @ap: ATA port link is attached to
6213 * @link: Link structure to initialize
8989805d 6214 * @pmp: Port multiplier port number
4fb37a25
TH
6215 *
6216 * Initialize @link.
6217 *
6218 * LOCKING:
6219 * Kernel thread context (may sleep)
6220 */
fb7fd614 6221void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6222{
6223 int i;
6224
6225 /* clear everything except for devices */
6226 memset(link, 0, offsetof(struct ata_link, device[0]));
6227
6228 link->ap = ap;
8989805d 6229 link->pmp = pmp;
4fb37a25
TH
6230 link->active_tag = ATA_TAG_POISON;
6231 link->hw_sata_spd_limit = UINT_MAX;
6232
6233 /* can't use iterator, ap isn't initialized yet */
6234 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6235 struct ata_device *dev = &link->device[i];
6236
6237 dev->link = link;
6238 dev->devno = dev - link->device;
6239 ata_dev_init(dev);
6240 }
6241}
6242
6243/**
6244 * sata_link_init_spd - Initialize link->sata_spd_limit
6245 * @link: Link to configure sata_spd_limit for
6246 *
6247 * Initialize @link->[hw_]sata_spd_limit to the currently
6248 * configured value.
6249 *
6250 * LOCKING:
6251 * Kernel thread context (may sleep).
6252 *
6253 * RETURNS:
6254 * 0 on success, -errno on failure.
6255 */
fb7fd614 6256int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6257{
6258 u32 scontrol, spd;
6259 int rc;
6260
6261 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6262 if (rc)
6263 return rc;
6264
6265 spd = (scontrol >> 4) & 0xf;
6266 if (spd)
6267 link->hw_sata_spd_limit &= (1 << spd) - 1;
6268
6269 link->sata_spd_limit = link->hw_sata_spd_limit;
6270
6271 return 0;
6272}
6273
1da177e4 6274/**
f3187195
TH
6275 * ata_port_alloc - allocate and initialize basic ATA port resources
6276 * @host: ATA host this allocated port belongs to
1da177e4 6277 *
f3187195
TH
6278 * Allocate and initialize basic ATA port resources.
6279 *
6280 * RETURNS:
6281 * Allocate ATA port on success, NULL on failure.
0cba632b 6282 *
1da177e4 6283 * LOCKING:
f3187195 6284 * Inherited from calling layer (may sleep).
1da177e4 6285 */
f3187195 6286struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6287{
f3187195 6288 struct ata_port *ap;
1da177e4 6289
f3187195
TH
6290 DPRINTK("ENTER\n");
6291
6292 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6293 if (!ap)
6294 return NULL;
6295
f4d6d004 6296 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6297 ap->lock = &host->lock;
198e0fed 6298 ap->flags = ATA_FLAG_DISABLED;
f3187195 6299 ap->print_id = -1;
1da177e4 6300 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6301 ap->host = host;
f3187195 6302 ap->dev = host->dev;
1da177e4 6303 ap->last_ctl = 0xFF;
bd5d825c
BP
6304
6305#if defined(ATA_VERBOSE_DEBUG)
6306 /* turn on all debugging levels */
6307 ap->msg_enable = 0x00FF;
6308#elif defined(ATA_DEBUG)
6309 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6310#else
0dd4b21f 6311 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6312#endif
1da177e4 6313
65f27f38
DH
6314 INIT_DELAYED_WORK(&ap->port_task, NULL);
6315 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6316 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6317 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6318 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6319 init_timer_deferrable(&ap->fastdrain_timer);
6320 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6321 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6322
838df628 6323 ap->cbl = ATA_CBL_NONE;
838df628 6324
8989805d 6325 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6326
6327#ifdef ATA_IRQ_TRAP
6328 ap->stats.unhandled_irq = 1;
6329 ap->stats.idle_irq = 1;
6330#endif
1da177e4 6331 return ap;
1da177e4
LT
6332}
6333
f0d36efd
TH
6334static void ata_host_release(struct device *gendev, void *res)
6335{
6336 struct ata_host *host = dev_get_drvdata(gendev);
6337 int i;
6338
6339 for (i = 0; i < host->n_ports; i++) {
6340 struct ata_port *ap = host->ports[i];
6341
ecef7253
TH
6342 if (!ap)
6343 continue;
6344
6345 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6346 ap->ops->port_stop(ap);
f0d36efd
TH
6347 }
6348
ecef7253 6349 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6350 host->ops->host_stop(host);
1aa56cca 6351
1aa506e4
TH
6352 for (i = 0; i < host->n_ports; i++) {
6353 struct ata_port *ap = host->ports[i];
6354
4911487a
TH
6355 if (!ap)
6356 continue;
6357
6358 if (ap->scsi_host)
1aa506e4
TH
6359 scsi_host_put(ap->scsi_host);
6360
4911487a 6361 kfree(ap);
1aa506e4
TH
6362 host->ports[i] = NULL;
6363 }
6364
1aa56cca 6365 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6366}
6367
f3187195
TH
6368/**
6369 * ata_host_alloc - allocate and init basic ATA host resources
6370 * @dev: generic device this host is associated with
6371 * @max_ports: maximum number of ATA ports associated with this host
6372 *
6373 * Allocate and initialize basic ATA host resources. LLD calls
6374 * this function to allocate a host, initializes it fully and
6375 * attaches it using ata_host_register().
6376 *
6377 * @max_ports ports are allocated and host->n_ports is
6378 * initialized to @max_ports. The caller is allowed to decrease
6379 * host->n_ports before calling ata_host_register(). The unused
6380 * ports will be automatically freed on registration.
6381 *
6382 * RETURNS:
6383 * Allocate ATA host on success, NULL on failure.
6384 *
6385 * LOCKING:
6386 * Inherited from calling layer (may sleep).
6387 */
6388struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6389{
6390 struct ata_host *host;
6391 size_t sz;
6392 int i;
6393
6394 DPRINTK("ENTER\n");
6395
6396 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6397 return NULL;
6398
6399 /* alloc a container for our list of ATA ports (buses) */
6400 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6401 /* alloc a container for our list of ATA ports (buses) */
6402 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6403 if (!host)
6404 goto err_out;
6405
6406 devres_add(dev, host);
6407 dev_set_drvdata(dev, host);
6408
6409 spin_lock_init(&host->lock);
6410 host->dev = dev;
6411 host->n_ports = max_ports;
6412
6413 /* allocate ports bound to this host */
6414 for (i = 0; i < max_ports; i++) {
6415 struct ata_port *ap;
6416
6417 ap = ata_port_alloc(host);
6418 if (!ap)
6419 goto err_out;
6420
6421 ap->port_no = i;
6422 host->ports[i] = ap;
6423 }
6424
6425 devres_remove_group(dev, NULL);
6426 return host;
6427
6428 err_out:
6429 devres_release_group(dev, NULL);
6430 return NULL;
6431}
6432
f5cda257
TH
6433/**
6434 * ata_host_alloc_pinfo - alloc host and init with port_info array
6435 * @dev: generic device this host is associated with
6436 * @ppi: array of ATA port_info to initialize host with
6437 * @n_ports: number of ATA ports attached to this host
6438 *
6439 * Allocate ATA host and initialize with info from @ppi. If NULL
6440 * terminated, @ppi may contain fewer entries than @n_ports. The
6441 * last entry will be used for the remaining ports.
6442 *
6443 * RETURNS:
6444 * Allocate ATA host on success, NULL on failure.
6445 *
6446 * LOCKING:
6447 * Inherited from calling layer (may sleep).
6448 */
6449struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6450 const struct ata_port_info * const * ppi,
6451 int n_ports)
6452{
6453 const struct ata_port_info *pi;
6454 struct ata_host *host;
6455 int i, j;
6456
6457 host = ata_host_alloc(dev, n_ports);
6458 if (!host)
6459 return NULL;
6460
6461 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6462 struct ata_port *ap = host->ports[i];
6463
6464 if (ppi[j])
6465 pi = ppi[j++];
6466
6467 ap->pio_mask = pi->pio_mask;
6468 ap->mwdma_mask = pi->mwdma_mask;
6469 ap->udma_mask = pi->udma_mask;
6470 ap->flags |= pi->flags;
0c88758b 6471 ap->link.flags |= pi->link_flags;
f5cda257
TH
6472 ap->ops = pi->port_ops;
6473
6474 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6475 host->ops = pi->port_ops;
6476 if (!host->private_data && pi->private_data)
6477 host->private_data = pi->private_data;
6478 }
6479
6480 return host;
6481}
6482
ecef7253
TH
6483/**
6484 * ata_host_start - start and freeze ports of an ATA host
6485 * @host: ATA host to start ports for
6486 *
6487 * Start and then freeze ports of @host. Started status is
6488 * recorded in host->flags, so this function can be called
6489 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6490 * once. If host->ops isn't initialized yet, its set to the
6491 * first non-dummy port ops.
ecef7253
TH
6492 *
6493 * LOCKING:
6494 * Inherited from calling layer (may sleep).
6495 *
6496 * RETURNS:
6497 * 0 if all ports are started successfully, -errno otherwise.
6498 */
6499int ata_host_start(struct ata_host *host)
6500{
6501 int i, rc;
6502
6503 if (host->flags & ATA_HOST_STARTED)
6504 return 0;
6505
6506 for (i = 0; i < host->n_ports; i++) {
6507 struct ata_port *ap = host->ports[i];
6508
f3187195
TH
6509 if (!host->ops && !ata_port_is_dummy(ap))
6510 host->ops = ap->ops;
6511
ecef7253
TH
6512 if (ap->ops->port_start) {
6513 rc = ap->ops->port_start(ap);
6514 if (rc) {
6515 ata_port_printk(ap, KERN_ERR, "failed to "
6516 "start port (errno=%d)\n", rc);
6517 goto err_out;
6518 }
6519 }
6520
6521 ata_eh_freeze_port(ap);
6522 }
6523
6524 host->flags |= ATA_HOST_STARTED;
6525 return 0;
6526
6527 err_out:
6528 while (--i >= 0) {
6529 struct ata_port *ap = host->ports[i];
6530
6531 if (ap->ops->port_stop)
6532 ap->ops->port_stop(ap);
6533 }
6534 return rc;
6535}
6536
b03732f0 6537/**
cca3974e
JG
6538 * ata_sas_host_init - Initialize a host struct
6539 * @host: host to initialize
6540 * @dev: device host is attached to
6541 * @flags: host flags
6542 * @ops: port_ops
b03732f0
BK
6543 *
6544 * LOCKING:
6545 * PCI/etc. bus probe sem.
6546 *
6547 */
f3187195 6548/* KILLME - the only user left is ipr */
cca3974e
JG
6549void ata_host_init(struct ata_host *host, struct device *dev,
6550 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6551{
cca3974e
JG
6552 spin_lock_init(&host->lock);
6553 host->dev = dev;
6554 host->flags = flags;
6555 host->ops = ops;
b03732f0
BK
6556}
6557
f3187195
TH
6558/**
6559 * ata_host_register - register initialized ATA host
6560 * @host: ATA host to register
6561 * @sht: template for SCSI host
6562 *
6563 * Register initialized ATA host. @host is allocated using
6564 * ata_host_alloc() and fully initialized by LLD. This function
6565 * starts ports, registers @host with ATA and SCSI layers and
6566 * probe registered devices.
6567 *
6568 * LOCKING:
6569 * Inherited from calling layer (may sleep).
6570 *
6571 * RETURNS:
6572 * 0 on success, -errno otherwise.
6573 */
6574int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6575{
6576 int i, rc;
6577
6578 /* host must have been started */
6579 if (!(host->flags & ATA_HOST_STARTED)) {
6580 dev_printk(KERN_ERR, host->dev,
6581 "BUG: trying to register unstarted host\n");
6582 WARN_ON(1);
6583 return -EINVAL;
6584 }
6585
6586 /* Blow away unused ports. This happens when LLD can't
6587 * determine the exact number of ports to allocate at
6588 * allocation time.
6589 */
6590 for (i = host->n_ports; host->ports[i]; i++)
6591 kfree(host->ports[i]);
6592
6593 /* give ports names and add SCSI hosts */
6594 for (i = 0; i < host->n_ports; i++)
6595 host->ports[i]->print_id = ata_print_id++;
6596
6597 rc = ata_scsi_add_hosts(host, sht);
6598 if (rc)
6599 return rc;
6600
fafbae87
TH
6601 /* associate with ACPI nodes */
6602 ata_acpi_associate(host);
6603
f3187195
TH
6604 /* set cable, sata_spd_limit and report */
6605 for (i = 0; i < host->n_ports; i++) {
6606 struct ata_port *ap = host->ports[i];
f3187195
TH
6607 unsigned long xfer_mask;
6608
6609 /* set SATA cable type if still unset */
6610 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6611 ap->cbl = ATA_CBL_SATA;
6612
6613 /* init sata_spd_limit to the current value */
4fb37a25 6614 sata_link_init_spd(&ap->link);
f3187195 6615
cbcdd875 6616 /* print per-port info to dmesg */
f3187195
TH
6617 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6618 ap->udma_mask);
6619
f3187195 6620 if (!ata_port_is_dummy(ap))
cbcdd875
TH
6621 ata_port_printk(ap, KERN_INFO,
6622 "%cATA max %s %s\n",
a16abc0b 6623 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 6624 ata_mode_string(xfer_mask),
cbcdd875 6625 ap->link.eh_info.desc);
f3187195
TH
6626 else
6627 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6628 }
6629
6630 /* perform each probe synchronously */
6631 DPRINTK("probe begin\n");
6632 for (i = 0; i < host->n_ports; i++) {
6633 struct ata_port *ap = host->ports[i];
6634 int rc;
6635
6636 /* probe */
6637 if (ap->ops->error_handler) {
9af5c9c9 6638 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
6639 unsigned long flags;
6640
6641 ata_port_probe(ap);
6642
6643 /* kick EH for boot probing */
6644 spin_lock_irqsave(ap->lock, flags);
6645
f58229f8
TH
6646 ehi->probe_mask =
6647 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
6648 ehi->action |= ATA_EH_SOFTRESET;
6649 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6650
f4d6d004 6651 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6652 ap->pflags |= ATA_PFLAG_LOADING;
6653 ata_port_schedule_eh(ap);
6654
6655 spin_unlock_irqrestore(ap->lock, flags);
6656
6657 /* wait for EH to finish */
6658 ata_port_wait_eh(ap);
6659 } else {
6660 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6661 rc = ata_bus_probe(ap);
6662 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6663
6664 if (rc) {
6665 /* FIXME: do something useful here?
6666 * Current libata behavior will
6667 * tear down everything when
6668 * the module is removed
6669 * or the h/w is unplugged.
6670 */
6671 }
6672 }
6673 }
6674
6675 /* probes are done, now scan each port's disk(s) */
6676 DPRINTK("host probe begin\n");
6677 for (i = 0; i < host->n_ports; i++) {
6678 struct ata_port *ap = host->ports[i];
6679
1ae46317 6680 ata_scsi_scan_host(ap, 1);
f3187195
TH
6681 }
6682
6683 return 0;
6684}
6685
f5cda257
TH
6686/**
6687 * ata_host_activate - start host, request IRQ and register it
6688 * @host: target ATA host
6689 * @irq: IRQ to request
6690 * @irq_handler: irq_handler used when requesting IRQ
6691 * @irq_flags: irq_flags used when requesting IRQ
6692 * @sht: scsi_host_template to use when registering the host
6693 *
6694 * After allocating an ATA host and initializing it, most libata
6695 * LLDs perform three steps to activate the host - start host,
6696 * request IRQ and register it. This helper takes necessasry
6697 * arguments and performs the three steps in one go.
6698 *
6699 * LOCKING:
6700 * Inherited from calling layer (may sleep).
6701 *
6702 * RETURNS:
6703 * 0 on success, -errno otherwise.
6704 */
6705int ata_host_activate(struct ata_host *host, int irq,
6706 irq_handler_t irq_handler, unsigned long irq_flags,
6707 struct scsi_host_template *sht)
6708{
cbcdd875 6709 int i, rc;
f5cda257
TH
6710
6711 rc = ata_host_start(host);
6712 if (rc)
6713 return rc;
6714
6715 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6716 dev_driver_string(host->dev), host);
6717 if (rc)
6718 return rc;
6719
cbcdd875
TH
6720 for (i = 0; i < host->n_ports; i++)
6721 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6722
f5cda257
TH
6723 rc = ata_host_register(host, sht);
6724 /* if failed, just free the IRQ and leave ports alone */
6725 if (rc)
6726 devm_free_irq(host->dev, irq, host);
6727
6728 return rc;
6729}
6730
720ba126
TH
6731/**
6732 * ata_port_detach - Detach ATA port in prepration of device removal
6733 * @ap: ATA port to be detached
6734 *
6735 * Detach all ATA devices and the associated SCSI devices of @ap;
6736 * then, remove the associated SCSI host. @ap is guaranteed to
6737 * be quiescent on return from this function.
6738 *
6739 * LOCKING:
6740 * Kernel thread context (may sleep).
6741 */
6742void ata_port_detach(struct ata_port *ap)
6743{
6744 unsigned long flags;
41bda9c9 6745 struct ata_link *link;
f58229f8 6746 struct ata_device *dev;
720ba126
TH
6747
6748 if (!ap->ops->error_handler)
c3cf30a9 6749 goto skip_eh;
720ba126
TH
6750
6751 /* tell EH we're leaving & flush EH */
ba6a1308 6752 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6753 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6754 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6755
6756 ata_port_wait_eh(ap);
6757
6758 /* EH is now guaranteed to see UNLOADING, so no new device
6759 * will be attached. Disable all existing devices.
6760 */
ba6a1308 6761 spin_lock_irqsave(ap->lock, flags);
720ba126 6762
41bda9c9
TH
6763 ata_port_for_each_link(link, ap) {
6764 ata_link_for_each_dev(dev, link)
6765 ata_dev_disable(dev);
6766 }
720ba126 6767
ba6a1308 6768 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6769
6770 /* Final freeze & EH. All in-flight commands are aborted. EH
6771 * will be skipped and retrials will be terminated with bad
6772 * target.
6773 */
ba6a1308 6774 spin_lock_irqsave(ap->lock, flags);
720ba126 6775 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6776 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6777
6778 ata_port_wait_eh(ap);
45a66c1c 6779 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 6780
c3cf30a9 6781 skip_eh:
720ba126 6782 /* remove the associated SCSI host */
cca3974e 6783 scsi_remove_host(ap->scsi_host);
720ba126
TH
6784}
6785
0529c159
TH
6786/**
6787 * ata_host_detach - Detach all ports of an ATA host
6788 * @host: Host to detach
6789 *
6790 * Detach all ports of @host.
6791 *
6792 * LOCKING:
6793 * Kernel thread context (may sleep).
6794 */
6795void ata_host_detach(struct ata_host *host)
6796{
6797 int i;
6798
6799 for (i = 0; i < host->n_ports; i++)
6800 ata_port_detach(host->ports[i]);
6801}
6802
1da177e4
LT
6803/**
6804 * ata_std_ports - initialize ioaddr with standard port offsets.
6805 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6806 *
6807 * Utility function which initializes data_addr, error_addr,
6808 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6809 * device_addr, status_addr, and command_addr to standard offsets
6810 * relative to cmd_addr.
6811 *
6812 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6813 */
0baab86b 6814
1da177e4
LT
6815void ata_std_ports(struct ata_ioports *ioaddr)
6816{
6817 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6818 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6819 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6820 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6821 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6822 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6823 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6824 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6825 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6826 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6827}
6828
0baab86b 6829
374b1873
JG
6830#ifdef CONFIG_PCI
6831
1da177e4
LT
6832/**
6833 * ata_pci_remove_one - PCI layer callback for device removal
6834 * @pdev: PCI device that was removed
6835 *
b878ca5d
TH
6836 * PCI layer indicates to libata via this hook that hot-unplug or
6837 * module unload event has occurred. Detach all ports. Resource
6838 * release is handled via devres.
1da177e4
LT
6839 *
6840 * LOCKING:
6841 * Inherited from PCI layer (may sleep).
6842 */
f0d36efd 6843void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6844{
6845 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6846 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6847
b878ca5d 6848 ata_host_detach(host);
1da177e4
LT
6849}
6850
6851/* move to PCI subsystem */
057ace5e 6852int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6853{
6854 unsigned long tmp = 0;
6855
6856 switch (bits->width) {
6857 case 1: {
6858 u8 tmp8 = 0;
6859 pci_read_config_byte(pdev, bits->reg, &tmp8);
6860 tmp = tmp8;
6861 break;
6862 }
6863 case 2: {
6864 u16 tmp16 = 0;
6865 pci_read_config_word(pdev, bits->reg, &tmp16);
6866 tmp = tmp16;
6867 break;
6868 }
6869 case 4: {
6870 u32 tmp32 = 0;
6871 pci_read_config_dword(pdev, bits->reg, &tmp32);
6872 tmp = tmp32;
6873 break;
6874 }
6875
6876 default:
6877 return -EINVAL;
6878 }
6879
6880 tmp &= bits->mask;
6881
6882 return (tmp == bits->val) ? 1 : 0;
6883}
9b847548 6884
6ffa01d8 6885#ifdef CONFIG_PM
3c5100c1 6886void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6887{
6888 pci_save_state(pdev);
4c90d971 6889 pci_disable_device(pdev);
500530f6 6890
4c90d971 6891 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6892 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6893}
6894
553c4aa6 6895int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6896{
553c4aa6
TH
6897 int rc;
6898
9b847548
JA
6899 pci_set_power_state(pdev, PCI_D0);
6900 pci_restore_state(pdev);
553c4aa6 6901
b878ca5d 6902 rc = pcim_enable_device(pdev);
553c4aa6
TH
6903 if (rc) {
6904 dev_printk(KERN_ERR, &pdev->dev,
6905 "failed to enable device after resume (%d)\n", rc);
6906 return rc;
6907 }
6908
9b847548 6909 pci_set_master(pdev);
553c4aa6 6910 return 0;
500530f6
TH
6911}
6912
3c5100c1 6913int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6914{
cca3974e 6915 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6916 int rc = 0;
6917
cca3974e 6918 rc = ata_host_suspend(host, mesg);
500530f6
TH
6919 if (rc)
6920 return rc;
6921
3c5100c1 6922 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6923
6924 return 0;
6925}
6926
6927int ata_pci_device_resume(struct pci_dev *pdev)
6928{
cca3974e 6929 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6930 int rc;
500530f6 6931
553c4aa6
TH
6932 rc = ata_pci_device_do_resume(pdev);
6933 if (rc == 0)
6934 ata_host_resume(host);
6935 return rc;
9b847548 6936}
6ffa01d8
TH
6937#endif /* CONFIG_PM */
6938
1da177e4
LT
6939#endif /* CONFIG_PCI */
6940
6941
1da177e4
LT
6942static int __init ata_init(void)
6943{
a8601e5f 6944 ata_probe_timeout *= HZ;
1da177e4
LT
6945 ata_wq = create_workqueue("ata");
6946 if (!ata_wq)
6947 return -ENOMEM;
6948
453b07ac
TH
6949 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6950 if (!ata_aux_wq) {
6951 destroy_workqueue(ata_wq);
6952 return -ENOMEM;
6953 }
6954
1da177e4
LT
6955 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6956 return 0;
6957}
6958
6959static void __exit ata_exit(void)
6960{
6961 destroy_workqueue(ata_wq);
453b07ac 6962 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6963}
6964
a4625085 6965subsys_initcall(ata_init);
1da177e4
LT
6966module_exit(ata_exit);
6967
67846b30 6968static unsigned long ratelimit_time;
34af946a 6969static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6970
6971int ata_ratelimit(void)
6972{
6973 int rc;
6974 unsigned long flags;
6975
6976 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6977
6978 if (time_after(jiffies, ratelimit_time)) {
6979 rc = 1;
6980 ratelimit_time = jiffies + (HZ/5);
6981 } else
6982 rc = 0;
6983
6984 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6985
6986 return rc;
6987}
6988
c22daff4
TH
6989/**
6990 * ata_wait_register - wait until register value changes
6991 * @reg: IO-mapped register
6992 * @mask: Mask to apply to read register value
6993 * @val: Wait condition
6994 * @interval_msec: polling interval in milliseconds
6995 * @timeout_msec: timeout in milliseconds
6996 *
6997 * Waiting for some bits of register to change is a common
6998 * operation for ATA controllers. This function reads 32bit LE
6999 * IO-mapped register @reg and tests for the following condition.
7000 *
7001 * (*@reg & mask) != val
7002 *
7003 * If the condition is met, it returns; otherwise, the process is
7004 * repeated after @interval_msec until timeout.
7005 *
7006 * LOCKING:
7007 * Kernel thread context (may sleep)
7008 *
7009 * RETURNS:
7010 * The final register value.
7011 */
7012u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7013 unsigned long interval_msec,
7014 unsigned long timeout_msec)
7015{
7016 unsigned long timeout;
7017 u32 tmp;
7018
7019 tmp = ioread32(reg);
7020
7021 /* Calculate timeout _after_ the first read to make sure
7022 * preceding writes reach the controller before starting to
7023 * eat away the timeout.
7024 */
7025 timeout = jiffies + (timeout_msec * HZ) / 1000;
7026
7027 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7028 msleep(interval_msec);
7029 tmp = ioread32(reg);
7030 }
7031
7032 return tmp;
7033}
7034
dd5b06c4
TH
7035/*
7036 * Dummy port_ops
7037 */
7038static void ata_dummy_noret(struct ata_port *ap) { }
7039static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7040static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7041
7042static u8 ata_dummy_check_status(struct ata_port *ap)
7043{
7044 return ATA_DRDY;
7045}
7046
7047static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7048{
7049 return AC_ERR_SYSTEM;
7050}
7051
7052const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7053 .check_status = ata_dummy_check_status,
7054 .check_altstatus = ata_dummy_check_status,
7055 .dev_select = ata_noop_dev_select,
7056 .qc_prep = ata_noop_qc_prep,
7057 .qc_issue = ata_dummy_qc_issue,
7058 .freeze = ata_dummy_noret,
7059 .thaw = ata_dummy_noret,
7060 .error_handler = ata_dummy_noret,
7061 .post_internal_cmd = ata_dummy_qc_noret,
7062 .irq_clear = ata_dummy_noret,
7063 .port_start = ata_dummy_ret0,
7064 .port_stop = ata_dummy_noret,
7065};
7066
21b0ad4f
TH
7067const struct ata_port_info ata_dummy_port_info = {
7068 .port_ops = &ata_dummy_port_ops,
7069};
7070
1da177e4
LT
7071/*
7072 * libata is essentially a library of internal helper functions for
7073 * low-level ATA host controller drivers. As such, the API/ABI is
7074 * likely to change as new drivers are added and updated.
7075 * Do not depend on ABI/API stability.
7076 */
7077
e9c83914
TH
7078EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7079EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7080EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7081EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7082EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7083EXPORT_SYMBOL_GPL(ata_std_bios_param);
7084EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7085EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7086EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7087EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7088EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7089EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7090EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7091EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7092EXPORT_SYMBOL_GPL(ata_sg_init);
7093EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7094EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7095EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7096EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7097EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7098EXPORT_SYMBOL_GPL(ata_tf_load);
7099EXPORT_SYMBOL_GPL(ata_tf_read);
7100EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7101EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7102EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7103EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7104EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7105EXPORT_SYMBOL_GPL(ata_check_status);
7106EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7107EXPORT_SYMBOL_GPL(ata_exec_command);
7108EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7109EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7110EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7111EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7112EXPORT_SYMBOL_GPL(ata_data_xfer);
7113EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 7114EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7115EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7116EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7117EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7118EXPORT_SYMBOL_GPL(ata_bmdma_start);
7119EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7120EXPORT_SYMBOL_GPL(ata_bmdma_status);
7121EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7122EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7123EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7124EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7125EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7126EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7127EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7128EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7129EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7130EXPORT_SYMBOL_GPL(sata_link_debounce);
7131EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7132EXPORT_SYMBOL_GPL(sata_phy_reset);
7133EXPORT_SYMBOL_GPL(__sata_phy_reset);
7134EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7135EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7136EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7137EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7138EXPORT_SYMBOL_GPL(sata_std_hardreset);
7139EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7140EXPORT_SYMBOL_GPL(ata_dev_classify);
7141EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7142EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7143EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7144EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7145EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 7146EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7147EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7148EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7149EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7150EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7151EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7152EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7153EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7154EXPORT_SYMBOL_GPL(sata_scr_valid);
7155EXPORT_SYMBOL_GPL(sata_scr_read);
7156EXPORT_SYMBOL_GPL(sata_scr_write);
7157EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7158EXPORT_SYMBOL_GPL(ata_link_online);
7159EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7160#ifdef CONFIG_PM
cca3974e
JG
7161EXPORT_SYMBOL_GPL(ata_host_suspend);
7162EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7163#endif /* CONFIG_PM */
6a62a04d
TH
7164EXPORT_SYMBOL_GPL(ata_id_string);
7165EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7166EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7167EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7168
1bc4ccff 7169EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7170EXPORT_SYMBOL_GPL(ata_timing_compute);
7171EXPORT_SYMBOL_GPL(ata_timing_merge);
7172
1da177e4
LT
7173#ifdef CONFIG_PCI
7174EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7175EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7176EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7177EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7178EXPORT_SYMBOL_GPL(ata_pci_init_one);
7179EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7180#ifdef CONFIG_PM
500530f6
TH
7181EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7182EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7183EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7184EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7185#endif /* CONFIG_PM */
67951ade
AC
7186EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7187EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7188#endif /* CONFIG_PCI */
9b847548 7189
b64bbc39
TH
7190EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7191EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7192EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7193EXPORT_SYMBOL_GPL(ata_port_desc);
7194#ifdef CONFIG_PCI
7195EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7196#endif /* CONFIG_PCI */
ece1d636 7197EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7198EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7199EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7200EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
7201EXPORT_SYMBOL_GPL(ata_port_freeze);
7202EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7203EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7204EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7205EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7206EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7207EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7208EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7209
7210EXPORT_SYMBOL_GPL(ata_cable_40wire);
7211EXPORT_SYMBOL_GPL(ata_cable_80wire);
7212EXPORT_SYMBOL_GPL(ata_cable_unknown);
7213EXPORT_SYMBOL_GPL(ata_cable_sata);