]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/ata/libata-core.c
[libata] ata_piix: add HP compaq laptop to short cable list
[mirror_ubuntu-focal-kernel.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5 62
d7bb4cc7 63/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 67
3373efd8
TH
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
9f45cbd3 71static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
3373efd8 72static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 73static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
baf4fdfa
ML
88int atapi_passthru16 = 1;
89module_param(atapi_passthru16, int, 0444);
90MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
91
c3c013a2
JG
92int libata_fua = 0;
93module_param_named(fua, libata_fua, int, 0444);
94MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
95
1e999736
AC
96static int ata_ignore_hpa = 0;
97module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
98MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
99
a8601e5f
AM
100static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
101module_param(ata_probe_timeout, int, 0444);
102MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
103
d7d0dad6
JG
104int libata_noacpi = 1;
105module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
106MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
107
1da177e4
LT
108MODULE_AUTHOR("Jeff Garzik");
109MODULE_DESCRIPTION("Library module for ATA devices");
110MODULE_LICENSE("GPL");
111MODULE_VERSION(DRV_VERSION);
112
0baab86b 113
1da177e4
LT
114/**
115 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
116 * @tf: Taskfile to convert
1da177e4 117 * @pmp: Port multiplier port
9977126c
TH
118 * @is_cmd: This FIS is for command
119 * @fis: Buffer into which data will output
1da177e4
LT
120 *
121 * Converts a standard ATA taskfile to a Serial ATA
122 * FIS structure (Register - Host to Device).
123 *
124 * LOCKING:
125 * Inherited from caller.
126 */
9977126c 127void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 128{
9977126c
TH
129 fis[0] = 0x27; /* Register - Host to Device FIS */
130 fis[1] = pmp & 0xf; /* Port multiplier number*/
131 if (is_cmd)
132 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
133
1da177e4
LT
134 fis[2] = tf->command;
135 fis[3] = tf->feature;
136
137 fis[4] = tf->lbal;
138 fis[5] = tf->lbam;
139 fis[6] = tf->lbah;
140 fis[7] = tf->device;
141
142 fis[8] = tf->hob_lbal;
143 fis[9] = tf->hob_lbam;
144 fis[10] = tf->hob_lbah;
145 fis[11] = tf->hob_feature;
146
147 fis[12] = tf->nsect;
148 fis[13] = tf->hob_nsect;
149 fis[14] = 0;
150 fis[15] = tf->ctl;
151
152 fis[16] = 0;
153 fis[17] = 0;
154 fis[18] = 0;
155 fis[19] = 0;
156}
157
158/**
159 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
160 * @fis: Buffer from which data will be input
161 * @tf: Taskfile to output
162 *
e12a1be6 163 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
164 *
165 * LOCKING:
166 * Inherited from caller.
167 */
168
057ace5e 169void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
170{
171 tf->command = fis[2]; /* status */
172 tf->feature = fis[3]; /* error */
173
174 tf->lbal = fis[4];
175 tf->lbam = fis[5];
176 tf->lbah = fis[6];
177 tf->device = fis[7];
178
179 tf->hob_lbal = fis[8];
180 tf->hob_lbam = fis[9];
181 tf->hob_lbah = fis[10];
182
183 tf->nsect = fis[12];
184 tf->hob_nsect = fis[13];
185}
186
8cbd6df1
AL
187static const u8 ata_rw_cmds[] = {
188 /* pio multi */
189 ATA_CMD_READ_MULTI,
190 ATA_CMD_WRITE_MULTI,
191 ATA_CMD_READ_MULTI_EXT,
192 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
193 0,
194 0,
195 0,
196 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
197 /* pio */
198 ATA_CMD_PIO_READ,
199 ATA_CMD_PIO_WRITE,
200 ATA_CMD_PIO_READ_EXT,
201 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
202 0,
203 0,
204 0,
205 0,
8cbd6df1
AL
206 /* dma */
207 ATA_CMD_READ,
208 ATA_CMD_WRITE,
209 ATA_CMD_READ_EXT,
9a3dccc4
TH
210 ATA_CMD_WRITE_EXT,
211 0,
212 0,
213 0,
214 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 215};
1da177e4
LT
216
217/**
8cbd6df1 218 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
219 * @tf: command to examine and configure
220 * @dev: device tf belongs to
1da177e4 221 *
2e9edbf8 222 * Examine the device configuration and tf->flags to calculate
8cbd6df1 223 * the proper read/write commands and protocol to use.
1da177e4
LT
224 *
225 * LOCKING:
226 * caller.
227 */
bd056d7e 228static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 229{
9a3dccc4 230 u8 cmd;
1da177e4 231
9a3dccc4 232 int index, fua, lba48, write;
2e9edbf8 233
9a3dccc4 234 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
235 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
236 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 237
8cbd6df1
AL
238 if (dev->flags & ATA_DFLAG_PIO) {
239 tf->protocol = ATA_PROT_PIO;
9a3dccc4 240 index = dev->multi_count ? 0 : 8;
9af5c9c9 241 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
242 /* Unable to use DMA due to host limitation */
243 tf->protocol = ATA_PROT_PIO;
0565c26d 244 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
245 } else {
246 tf->protocol = ATA_PROT_DMA;
9a3dccc4 247 index = 16;
8cbd6df1 248 }
1da177e4 249
9a3dccc4
TH
250 cmd = ata_rw_cmds[index + fua + lba48 + write];
251 if (cmd) {
252 tf->command = cmd;
253 return 0;
254 }
255 return -1;
1da177e4
LT
256}
257
35b649fe
TH
258/**
259 * ata_tf_read_block - Read block address from ATA taskfile
260 * @tf: ATA taskfile of interest
261 * @dev: ATA device @tf belongs to
262 *
263 * LOCKING:
264 * None.
265 *
266 * Read block address from @tf. This function can handle all
267 * three address formats - LBA, LBA48 and CHS. tf->protocol and
268 * flags select the address format to use.
269 *
270 * RETURNS:
271 * Block address read from @tf.
272 */
273u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
274{
275 u64 block = 0;
276
277 if (tf->flags & ATA_TFLAG_LBA) {
278 if (tf->flags & ATA_TFLAG_LBA48) {
279 block |= (u64)tf->hob_lbah << 40;
280 block |= (u64)tf->hob_lbam << 32;
281 block |= tf->hob_lbal << 24;
282 } else
283 block |= (tf->device & 0xf) << 24;
284
285 block |= tf->lbah << 16;
286 block |= tf->lbam << 8;
287 block |= tf->lbal;
288 } else {
289 u32 cyl, head, sect;
290
291 cyl = tf->lbam | (tf->lbah << 8);
292 head = tf->device & 0xf;
293 sect = tf->lbal;
294
295 block = (cyl * dev->heads + head) * dev->sectors + sect;
296 }
297
298 return block;
299}
300
bd056d7e
TH
301/**
302 * ata_build_rw_tf - Build ATA taskfile for given read/write request
303 * @tf: Target ATA taskfile
304 * @dev: ATA device @tf belongs to
305 * @block: Block address
306 * @n_block: Number of blocks
307 * @tf_flags: RW/FUA etc...
308 * @tag: tag
309 *
310 * LOCKING:
311 * None.
312 *
313 * Build ATA taskfile @tf for read/write request described by
314 * @block, @n_block, @tf_flags and @tag on @dev.
315 *
316 * RETURNS:
317 *
318 * 0 on success, -ERANGE if the request is too large for @dev,
319 * -EINVAL if the request is invalid.
320 */
321int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
322 u64 block, u32 n_block, unsigned int tf_flags,
323 unsigned int tag)
324{
325 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
326 tf->flags |= tf_flags;
327
6d1245bf 328 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
329 /* yay, NCQ */
330 if (!lba_48_ok(block, n_block))
331 return -ERANGE;
332
333 tf->protocol = ATA_PROT_NCQ;
334 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
335
336 if (tf->flags & ATA_TFLAG_WRITE)
337 tf->command = ATA_CMD_FPDMA_WRITE;
338 else
339 tf->command = ATA_CMD_FPDMA_READ;
340
341 tf->nsect = tag << 3;
342 tf->hob_feature = (n_block >> 8) & 0xff;
343 tf->feature = n_block & 0xff;
344
345 tf->hob_lbah = (block >> 40) & 0xff;
346 tf->hob_lbam = (block >> 32) & 0xff;
347 tf->hob_lbal = (block >> 24) & 0xff;
348 tf->lbah = (block >> 16) & 0xff;
349 tf->lbam = (block >> 8) & 0xff;
350 tf->lbal = block & 0xff;
351
352 tf->device = 1 << 6;
353 if (tf->flags & ATA_TFLAG_FUA)
354 tf->device |= 1 << 7;
355 } else if (dev->flags & ATA_DFLAG_LBA) {
356 tf->flags |= ATA_TFLAG_LBA;
357
358 if (lba_28_ok(block, n_block)) {
359 /* use LBA28 */
360 tf->device |= (block >> 24) & 0xf;
361 } else if (lba_48_ok(block, n_block)) {
362 if (!(dev->flags & ATA_DFLAG_LBA48))
363 return -ERANGE;
364
365 /* use LBA48 */
366 tf->flags |= ATA_TFLAG_LBA48;
367
368 tf->hob_nsect = (n_block >> 8) & 0xff;
369
370 tf->hob_lbah = (block >> 40) & 0xff;
371 tf->hob_lbam = (block >> 32) & 0xff;
372 tf->hob_lbal = (block >> 24) & 0xff;
373 } else
374 /* request too large even for LBA48 */
375 return -ERANGE;
376
377 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
378 return -EINVAL;
379
380 tf->nsect = n_block & 0xff;
381
382 tf->lbah = (block >> 16) & 0xff;
383 tf->lbam = (block >> 8) & 0xff;
384 tf->lbal = block & 0xff;
385
386 tf->device |= ATA_LBA;
387 } else {
388 /* CHS */
389 u32 sect, head, cyl, track;
390
391 /* The request -may- be too large for CHS addressing. */
392 if (!lba_28_ok(block, n_block))
393 return -ERANGE;
394
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396 return -EINVAL;
397
398 /* Convert LBA to CHS */
399 track = (u32)block / dev->sectors;
400 cyl = track / dev->heads;
401 head = track % dev->heads;
402 sect = (u32)block % dev->sectors + 1;
403
404 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
405 (u32)block, track, cyl, head, sect);
406
407 /* Check whether the converted CHS can fit.
408 Cylinder: 0-65535
409 Head: 0-15
410 Sector: 1-255*/
411 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
412 return -ERANGE;
413
414 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
415 tf->lbal = sect;
416 tf->lbam = cyl;
417 tf->lbah = cyl >> 8;
418 tf->device |= head;
419 }
420
421 return 0;
422}
423
cb95d562
TH
424/**
425 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
426 * @pio_mask: pio_mask
427 * @mwdma_mask: mwdma_mask
428 * @udma_mask: udma_mask
429 *
430 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
431 * unsigned int xfer_mask.
432 *
433 * LOCKING:
434 * None.
435 *
436 * RETURNS:
437 * Packed xfer_mask.
438 */
439static unsigned int ata_pack_xfermask(unsigned int pio_mask,
440 unsigned int mwdma_mask,
441 unsigned int udma_mask)
442{
443 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
444 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
445 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
446}
447
c0489e4e
TH
448/**
449 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
450 * @xfer_mask: xfer_mask to unpack
451 * @pio_mask: resulting pio_mask
452 * @mwdma_mask: resulting mwdma_mask
453 * @udma_mask: resulting udma_mask
454 *
455 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
456 * Any NULL distination masks will be ignored.
457 */
458static void ata_unpack_xfermask(unsigned int xfer_mask,
459 unsigned int *pio_mask,
460 unsigned int *mwdma_mask,
461 unsigned int *udma_mask)
462{
463 if (pio_mask)
464 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
465 if (mwdma_mask)
466 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
467 if (udma_mask)
468 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
469}
470
cb95d562 471static const struct ata_xfer_ent {
be9a50c8 472 int shift, bits;
cb95d562
TH
473 u8 base;
474} ata_xfer_tbl[] = {
475 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
476 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
477 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
478 { -1, },
479};
480
481/**
482 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
483 * @xfer_mask: xfer_mask of interest
484 *
485 * Return matching XFER_* value for @xfer_mask. Only the highest
486 * bit of @xfer_mask is considered.
487 *
488 * LOCKING:
489 * None.
490 *
491 * RETURNS:
492 * Matching XFER_* value, 0 if no match found.
493 */
494static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
495{
496 int highbit = fls(xfer_mask) - 1;
497 const struct ata_xfer_ent *ent;
498
499 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
500 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
501 return ent->base + highbit - ent->shift;
502 return 0;
503}
504
505/**
506 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
507 * @xfer_mode: XFER_* of interest
508 *
509 * Return matching xfer_mask for @xfer_mode.
510 *
511 * LOCKING:
512 * None.
513 *
514 * RETURNS:
515 * Matching xfer_mask, 0 if no match found.
516 */
517static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
518{
519 const struct ata_xfer_ent *ent;
520
521 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
522 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
523 return 1 << (ent->shift + xfer_mode - ent->base);
524 return 0;
525}
526
527/**
528 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
529 * @xfer_mode: XFER_* of interest
530 *
531 * Return matching xfer_shift for @xfer_mode.
532 *
533 * LOCKING:
534 * None.
535 *
536 * RETURNS:
537 * Matching xfer_shift, -1 if no match found.
538 */
539static int ata_xfer_mode2shift(unsigned int xfer_mode)
540{
541 const struct ata_xfer_ent *ent;
542
543 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
544 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
545 return ent->shift;
546 return -1;
547}
548
1da177e4 549/**
1da7b0d0
TH
550 * ata_mode_string - convert xfer_mask to string
551 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
552 *
553 * Determine string which represents the highest speed
1da7b0d0 554 * (highest bit in @modemask).
1da177e4
LT
555 *
556 * LOCKING:
557 * None.
558 *
559 * RETURNS:
560 * Constant C string representing highest speed listed in
1da7b0d0 561 * @mode_mask, or the constant C string "<n/a>".
1da177e4 562 */
1da7b0d0 563static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 564{
75f554bc
TH
565 static const char * const xfer_mode_str[] = {
566 "PIO0",
567 "PIO1",
568 "PIO2",
569 "PIO3",
570 "PIO4",
b352e57d
AC
571 "PIO5",
572 "PIO6",
75f554bc
TH
573 "MWDMA0",
574 "MWDMA1",
575 "MWDMA2",
b352e57d
AC
576 "MWDMA3",
577 "MWDMA4",
75f554bc
TH
578 "UDMA/16",
579 "UDMA/25",
580 "UDMA/33",
581 "UDMA/44",
582 "UDMA/66",
583 "UDMA/100",
584 "UDMA/133",
585 "UDMA7",
586 };
1da7b0d0 587 int highbit;
1da177e4 588
1da7b0d0
TH
589 highbit = fls(xfer_mask) - 1;
590 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
591 return xfer_mode_str[highbit];
1da177e4 592 return "<n/a>";
1da177e4
LT
593}
594
4c360c81
TH
595static const char *sata_spd_string(unsigned int spd)
596{
597 static const char * const spd_str[] = {
598 "1.5 Gbps",
599 "3.0 Gbps",
600 };
601
602 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
603 return "<unknown>";
604 return spd_str[spd - 1];
605}
606
3373efd8 607void ata_dev_disable(struct ata_device *dev)
0b8efb0a 608{
09d7f9b0 609 if (ata_dev_enabled(dev)) {
9af5c9c9 610 if (ata_msg_drv(dev->link->ap))
09d7f9b0 611 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
612 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
613 ATA_DNXFER_QUIET);
0b8efb0a
TH
614 dev->class++;
615 }
616}
617
1da177e4 618/**
0d5ff566 619 * ata_devchk - PATA device presence detection
1da177e4
LT
620 * @ap: ATA channel to examine
621 * @device: Device to examine (starting at zero)
622 *
623 * This technique was originally described in
624 * Hale Landis's ATADRVR (www.ata-atapi.com), and
625 * later found its way into the ATA/ATAPI spec.
626 *
627 * Write a pattern to the ATA shadow registers,
628 * and if a device is present, it will respond by
629 * correctly storing and echoing back the
630 * ATA shadow register contents.
631 *
632 * LOCKING:
633 * caller.
634 */
635
0d5ff566 636static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
637{
638 struct ata_ioports *ioaddr = &ap->ioaddr;
639 u8 nsect, lbal;
640
641 ap->ops->dev_select(ap, device);
642
0d5ff566
TH
643 iowrite8(0x55, ioaddr->nsect_addr);
644 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 645
0d5ff566
TH
646 iowrite8(0xaa, ioaddr->nsect_addr);
647 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 648
0d5ff566
TH
649 iowrite8(0x55, ioaddr->nsect_addr);
650 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 651
0d5ff566
TH
652 nsect = ioread8(ioaddr->nsect_addr);
653 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
654
655 if ((nsect == 0x55) && (lbal == 0xaa))
656 return 1; /* we found a device */
657
658 return 0; /* nothing found */
659}
660
1da177e4
LT
661/**
662 * ata_dev_classify - determine device type based on ATA-spec signature
663 * @tf: ATA taskfile register set for device to be identified
664 *
665 * Determine from taskfile register contents whether a device is
666 * ATA or ATAPI, as per "Signature and persistence" section
667 * of ATA/PI spec (volume 1, sect 5.14).
668 *
669 * LOCKING:
670 * None.
671 *
672 * RETURNS:
673 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
674 * the event of failure.
675 */
676
057ace5e 677unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
678{
679 /* Apple's open source Darwin code hints that some devices only
680 * put a proper signature into the LBA mid/high registers,
681 * So, we only check those. It's sufficient for uniqueness.
682 */
683
684 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
685 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
686 DPRINTK("found ATA device by sig\n");
687 return ATA_DEV_ATA;
688 }
689
690 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
691 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
692 DPRINTK("found ATAPI device by sig\n");
693 return ATA_DEV_ATAPI;
694 }
695
696 DPRINTK("unknown device\n");
697 return ATA_DEV_UNKNOWN;
698}
699
700/**
701 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
702 * @dev: ATA device to classify (starting at zero)
703 * @present: device seems present
b4dc7623 704 * @r_err: Value of error register on completion
1da177e4
LT
705 *
706 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
707 * an ATA/ATAPI-defined set of values is placed in the ATA
708 * shadow registers, indicating the results of device detection
709 * and diagnostics.
710 *
711 * Select the ATA device, and read the values from the ATA shadow
712 * registers. Then parse according to the Error register value,
713 * and the spec-defined values examined by ata_dev_classify().
714 *
715 * LOCKING:
716 * caller.
b4dc7623
TH
717 *
718 * RETURNS:
719 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 720 */
3f19859e
TH
721unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
722 u8 *r_err)
1da177e4 723{
3f19859e 724 struct ata_port *ap = dev->link->ap;
1da177e4
LT
725 struct ata_taskfile tf;
726 unsigned int class;
727 u8 err;
728
3f19859e 729 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
730
731 memset(&tf, 0, sizeof(tf));
732
1da177e4 733 ap->ops->tf_read(ap, &tf);
0169e284 734 err = tf.feature;
b4dc7623
TH
735 if (r_err)
736 *r_err = err;
1da177e4 737
93590859 738 /* see if device passed diags: if master then continue and warn later */
3f19859e 739 if (err == 0 && dev->devno == 0)
93590859 740 /* diagnostic fail : do nothing _YET_ */
3f19859e 741 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 742 else if (err == 1)
1da177e4 743 /* do nothing */ ;
3f19859e 744 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
745 /* do nothing */ ;
746 else
b4dc7623 747 return ATA_DEV_NONE;
1da177e4 748
b4dc7623 749 /* determine if device is ATA or ATAPI */
1da177e4 750 class = ata_dev_classify(&tf);
b4dc7623 751
d7fbee05
TH
752 if (class == ATA_DEV_UNKNOWN) {
753 /* If the device failed diagnostic, it's likely to
754 * have reported incorrect device signature too.
755 * Assume ATA device if the device seems present but
756 * device signature is invalid with diagnostic
757 * failure.
758 */
759 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
760 class = ATA_DEV_ATA;
761 else
762 class = ATA_DEV_NONE;
763 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
764 class = ATA_DEV_NONE;
765
b4dc7623 766 return class;
1da177e4
LT
767}
768
769/**
6a62a04d 770 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
771 * @id: IDENTIFY DEVICE results we will examine
772 * @s: string into which data is output
773 * @ofs: offset into identify device page
774 * @len: length of string to return. must be an even number.
775 *
776 * The strings in the IDENTIFY DEVICE page are broken up into
777 * 16-bit chunks. Run through the string, and output each
778 * 8-bit chunk linearly, regardless of platform.
779 *
780 * LOCKING:
781 * caller.
782 */
783
6a62a04d
TH
784void ata_id_string(const u16 *id, unsigned char *s,
785 unsigned int ofs, unsigned int len)
1da177e4
LT
786{
787 unsigned int c;
788
789 while (len > 0) {
790 c = id[ofs] >> 8;
791 *s = c;
792 s++;
793
794 c = id[ofs] & 0xff;
795 *s = c;
796 s++;
797
798 ofs++;
799 len -= 2;
800 }
801}
802
0e949ff3 803/**
6a62a04d 804 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
805 * @id: IDENTIFY DEVICE results we will examine
806 * @s: string into which data is output
807 * @ofs: offset into identify device page
808 * @len: length of string to return. must be an odd number.
809 *
6a62a04d 810 * This function is identical to ata_id_string except that it
0e949ff3
TH
811 * trims trailing spaces and terminates the resulting string with
812 * null. @len must be actual maximum length (even number) + 1.
813 *
814 * LOCKING:
815 * caller.
816 */
6a62a04d
TH
817void ata_id_c_string(const u16 *id, unsigned char *s,
818 unsigned int ofs, unsigned int len)
0e949ff3
TH
819{
820 unsigned char *p;
821
822 WARN_ON(!(len & 1));
823
6a62a04d 824 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
825
826 p = s + strnlen(s, len - 1);
827 while (p > s && p[-1] == ' ')
828 p--;
829 *p = '\0';
830}
0baab86b 831
db6f8759
TH
832static u64 ata_id_n_sectors(const u16 *id)
833{
834 if (ata_id_has_lba(id)) {
835 if (ata_id_has_lba48(id))
836 return ata_id_u64(id, 100);
837 else
838 return ata_id_u32(id, 60);
839 } else {
840 if (ata_id_current_chs_valid(id))
841 return ata_id_u32(id, 57);
842 else
843 return id[1] * id[3] * id[6];
844 }
845}
846
1e999736
AC
847static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
848{
849 u64 sectors = 0;
850
851 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
852 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
853 sectors |= (tf->hob_lbal & 0xff) << 24;
854 sectors |= (tf->lbah & 0xff) << 16;
855 sectors |= (tf->lbam & 0xff) << 8;
856 sectors |= (tf->lbal & 0xff);
857
858 return ++sectors;
859}
860
861static u64 ata_tf_to_lba(struct ata_taskfile *tf)
862{
863 u64 sectors = 0;
864
865 sectors |= (tf->device & 0x0f) << 24;
866 sectors |= (tf->lbah & 0xff) << 16;
867 sectors |= (tf->lbam & 0xff) << 8;
868 sectors |= (tf->lbal & 0xff);
869
870 return ++sectors;
871}
872
873/**
c728a914
TH
874 * ata_read_native_max_address - Read native max address
875 * @dev: target device
876 * @max_sectors: out parameter for the result native max address
1e999736 877 *
c728a914
TH
878 * Perform an LBA48 or LBA28 native size query upon the device in
879 * question.
1e999736 880 *
c728a914
TH
881 * RETURNS:
882 * 0 on success, -EACCES if command is aborted by the drive.
883 * -EIO on other errors.
1e999736 884 */
c728a914 885static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 886{
c728a914 887 unsigned int err_mask;
1e999736 888 struct ata_taskfile tf;
c728a914 889 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
890
891 ata_tf_init(dev, &tf);
892
c728a914 893 /* always clear all address registers */
1e999736 894 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 895
c728a914
TH
896 if (lba48) {
897 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
898 tf.flags |= ATA_TFLAG_LBA48;
899 } else
900 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 901
1e999736 902 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
903 tf.device |= ATA_LBA;
904
905 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
906 if (err_mask) {
907 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
908 "max address (err_mask=0x%x)\n", err_mask);
909 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
910 return -EACCES;
911 return -EIO;
912 }
1e999736 913
c728a914
TH
914 if (lba48)
915 *max_sectors = ata_tf_to_lba48(&tf);
916 else
917 *max_sectors = ata_tf_to_lba(&tf);
1e999736 918
c728a914 919 return 0;
1e999736
AC
920}
921
922/**
c728a914
TH
923 * ata_set_max_sectors - Set max sectors
924 * @dev: target device
6b38d1d1 925 * @new_sectors: new max sectors value to set for the device
1e999736 926 *
c728a914
TH
927 * Set max sectors of @dev to @new_sectors.
928 *
929 * RETURNS:
930 * 0 on success, -EACCES if command is aborted or denied (due to
931 * previous non-volatile SET_MAX) by the drive. -EIO on other
932 * errors.
1e999736 933 */
05027adc 934static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 935{
c728a914 936 unsigned int err_mask;
1e999736 937 struct ata_taskfile tf;
c728a914 938 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
939
940 new_sectors--;
941
942 ata_tf_init(dev, &tf);
943
1e999736 944 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
945
946 if (lba48) {
947 tf.command = ATA_CMD_SET_MAX_EXT;
948 tf.flags |= ATA_TFLAG_LBA48;
949
950 tf.hob_lbal = (new_sectors >> 24) & 0xff;
951 tf.hob_lbam = (new_sectors >> 32) & 0xff;
952 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 953 } else {
c728a914
TH
954 tf.command = ATA_CMD_SET_MAX;
955
1e582ba4
TH
956 tf.device |= (new_sectors >> 24) & 0xf;
957 }
958
1e999736 959 tf.protocol |= ATA_PROT_NODATA;
c728a914 960 tf.device |= ATA_LBA;
1e999736
AC
961
962 tf.lbal = (new_sectors >> 0) & 0xff;
963 tf.lbam = (new_sectors >> 8) & 0xff;
964 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 965
c728a914
TH
966 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
967 if (err_mask) {
968 ata_dev_printk(dev, KERN_WARNING, "failed to set "
969 "max address (err_mask=0x%x)\n", err_mask);
970 if (err_mask == AC_ERR_DEV &&
971 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
972 return -EACCES;
973 return -EIO;
974 }
975
c728a914 976 return 0;
1e999736
AC
977}
978
979/**
980 * ata_hpa_resize - Resize a device with an HPA set
981 * @dev: Device to resize
982 *
983 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
984 * it if required to the full size of the media. The caller must check
985 * the drive has the HPA feature set enabled.
05027adc
TH
986 *
987 * RETURNS:
988 * 0 on success, -errno on failure.
1e999736 989 */
05027adc 990static int ata_hpa_resize(struct ata_device *dev)
1e999736 991{
05027adc
TH
992 struct ata_eh_context *ehc = &dev->link->eh_context;
993 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
994 u64 sectors = ata_id_n_sectors(dev->id);
995 u64 native_sectors;
c728a914 996 int rc;
a617c09f 997
05027adc
TH
998 /* do we need to do it? */
999 if (dev->class != ATA_DEV_ATA ||
1000 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1001 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1002 return 0;
1e999736 1003
05027adc
TH
1004 /* read native max address */
1005 rc = ata_read_native_max_address(dev, &native_sectors);
1006 if (rc) {
1007 /* If HPA isn't going to be unlocked, skip HPA
1008 * resizing from the next try.
1009 */
1010 if (!ata_ignore_hpa) {
1011 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1012 "broken, will skip HPA handling\n");
1013 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1014
1015 /* we can continue if device aborted the command */
1016 if (rc == -EACCES)
1017 rc = 0;
1e999736 1018 }
37301a55 1019
05027adc
TH
1020 return rc;
1021 }
1022
1023 /* nothing to do? */
1024 if (native_sectors <= sectors || !ata_ignore_hpa) {
1025 if (!print_info || native_sectors == sectors)
1026 return 0;
1027
1028 if (native_sectors > sectors)
1029 ata_dev_printk(dev, KERN_INFO,
1030 "HPA detected: current %llu, native %llu\n",
1031 (unsigned long long)sectors,
1032 (unsigned long long)native_sectors);
1033 else if (native_sectors < sectors)
1034 ata_dev_printk(dev, KERN_WARNING,
1035 "native sectors (%llu) is smaller than "
1036 "sectors (%llu)\n",
1037 (unsigned long long)native_sectors,
1038 (unsigned long long)sectors);
1039 return 0;
1040 }
1041
1042 /* let's unlock HPA */
1043 rc = ata_set_max_sectors(dev, native_sectors);
1044 if (rc == -EACCES) {
1045 /* if device aborted the command, skip HPA resizing */
1046 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1047 "(%llu -> %llu), skipping HPA handling\n",
1048 (unsigned long long)sectors,
1049 (unsigned long long)native_sectors);
1050 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1051 return 0;
1052 } else if (rc)
1053 return rc;
1054
1055 /* re-read IDENTIFY data */
1056 rc = ata_dev_reread_id(dev, 0);
1057 if (rc) {
1058 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1059 "data after HPA resizing\n");
1060 return rc;
1061 }
1062
1063 if (print_info) {
1064 u64 new_sectors = ata_id_n_sectors(dev->id);
1065 ata_dev_printk(dev, KERN_INFO,
1066 "HPA unlocked: %llu -> %llu, native %llu\n",
1067 (unsigned long long)sectors,
1068 (unsigned long long)new_sectors,
1069 (unsigned long long)native_sectors);
1070 }
1071
1072 return 0;
1e999736
AC
1073}
1074
10305f0f
AC
1075/**
1076 * ata_id_to_dma_mode - Identify DMA mode from id block
1077 * @dev: device to identify
cc261267 1078 * @unknown: mode to assume if we cannot tell
10305f0f
AC
1079 *
1080 * Set up the timing values for the device based upon the identify
1081 * reported values for the DMA mode. This function is used by drivers
1082 * which rely upon firmware configured modes, but wish to report the
1083 * mode correctly when possible.
1084 *
1085 * In addition we emit similarly formatted messages to the default
1086 * ata_dev_set_mode handler, in order to provide consistency of
1087 * presentation.
1088 */
1089
1090void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1091{
1092 unsigned int mask;
1093 u8 mode;
1094
1095 /* Pack the DMA modes */
1096 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1097 if (dev->id[53] & 0x04)
1098 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1099
1100 /* Select the mode in use */
1101 mode = ata_xfer_mask2mode(mask);
1102
1103 if (mode != 0) {
1104 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1105 ata_mode_string(mask));
1106 } else {
1107 /* SWDMA perhaps ? */
1108 mode = unknown;
1109 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1110 }
1111
1112 /* Configure the device reporting */
1113 dev->xfer_mode = mode;
1114 dev->xfer_shift = ata_xfer_mode2shift(mode);
1115}
1116
0baab86b
EF
1117/**
1118 * ata_noop_dev_select - Select device 0/1 on ATA bus
1119 * @ap: ATA channel to manipulate
1120 * @device: ATA device (numbered from zero) to select
1121 *
1122 * This function performs no actual function.
1123 *
1124 * May be used as the dev_select() entry in ata_port_operations.
1125 *
1126 * LOCKING:
1127 * caller.
1128 */
1da177e4
LT
1129void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1130{
1131}
1132
0baab86b 1133
1da177e4
LT
1134/**
1135 * ata_std_dev_select - Select device 0/1 on ATA bus
1136 * @ap: ATA channel to manipulate
1137 * @device: ATA device (numbered from zero) to select
1138 *
1139 * Use the method defined in the ATA specification to
1140 * make either device 0, or device 1, active on the
0baab86b
EF
1141 * ATA channel. Works with both PIO and MMIO.
1142 *
1143 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1144 *
1145 * LOCKING:
1146 * caller.
1147 */
1148
1149void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1150{
1151 u8 tmp;
1152
1153 if (device == 0)
1154 tmp = ATA_DEVICE_OBS;
1155 else
1156 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1157
0d5ff566 1158 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1159 ata_pause(ap); /* needed; also flushes, for mmio */
1160}
1161
1162/**
1163 * ata_dev_select - Select device 0/1 on ATA bus
1164 * @ap: ATA channel to manipulate
1165 * @device: ATA device (numbered from zero) to select
1166 * @wait: non-zero to wait for Status register BSY bit to clear
1167 * @can_sleep: non-zero if context allows sleeping
1168 *
1169 * Use the method defined in the ATA specification to
1170 * make either device 0, or device 1, active on the
1171 * ATA channel.
1172 *
1173 * This is a high-level version of ata_std_dev_select(),
1174 * which additionally provides the services of inserting
1175 * the proper pauses and status polling, where needed.
1176 *
1177 * LOCKING:
1178 * caller.
1179 */
1180
1181void ata_dev_select(struct ata_port *ap, unsigned int device,
1182 unsigned int wait, unsigned int can_sleep)
1183{
88574551 1184 if (ata_msg_probe(ap))
44877b4e
TH
1185 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1186 "device %u, wait %u\n", device, wait);
1da177e4
LT
1187
1188 if (wait)
1189 ata_wait_idle(ap);
1190
1191 ap->ops->dev_select(ap, device);
1192
1193 if (wait) {
9af5c9c9 1194 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1195 msleep(150);
1196 ata_wait_idle(ap);
1197 }
1198}
1199
1200/**
1201 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1202 * @id: IDENTIFY DEVICE page to dump
1da177e4 1203 *
0bd3300a
TH
1204 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1205 * page.
1da177e4
LT
1206 *
1207 * LOCKING:
1208 * caller.
1209 */
1210
0bd3300a 1211static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1212{
1213 DPRINTK("49==0x%04x "
1214 "53==0x%04x "
1215 "63==0x%04x "
1216 "64==0x%04x "
1217 "75==0x%04x \n",
0bd3300a
TH
1218 id[49],
1219 id[53],
1220 id[63],
1221 id[64],
1222 id[75]);
1da177e4
LT
1223 DPRINTK("80==0x%04x "
1224 "81==0x%04x "
1225 "82==0x%04x "
1226 "83==0x%04x "
1227 "84==0x%04x \n",
0bd3300a
TH
1228 id[80],
1229 id[81],
1230 id[82],
1231 id[83],
1232 id[84]);
1da177e4
LT
1233 DPRINTK("88==0x%04x "
1234 "93==0x%04x\n",
0bd3300a
TH
1235 id[88],
1236 id[93]);
1da177e4
LT
1237}
1238
cb95d562
TH
1239/**
1240 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1241 * @id: IDENTIFY data to compute xfer mask from
1242 *
1243 * Compute the xfermask for this device. This is not as trivial
1244 * as it seems if we must consider early devices correctly.
1245 *
1246 * FIXME: pre IDE drive timing (do we care ?).
1247 *
1248 * LOCKING:
1249 * None.
1250 *
1251 * RETURNS:
1252 * Computed xfermask
1253 */
1254static unsigned int ata_id_xfermask(const u16 *id)
1255{
1256 unsigned int pio_mask, mwdma_mask, udma_mask;
1257
1258 /* Usual case. Word 53 indicates word 64 is valid */
1259 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1260 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1261 pio_mask <<= 3;
1262 pio_mask |= 0x7;
1263 } else {
1264 /* If word 64 isn't valid then Word 51 high byte holds
1265 * the PIO timing number for the maximum. Turn it into
1266 * a mask.
1267 */
7a0f1c8a 1268 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1269 if (mode < 5) /* Valid PIO range */
1270 pio_mask = (2 << mode) - 1;
1271 else
1272 pio_mask = 1;
cb95d562
TH
1273
1274 /* But wait.. there's more. Design your standards by
1275 * committee and you too can get a free iordy field to
1276 * process. However its the speeds not the modes that
1277 * are supported... Note drivers using the timing API
1278 * will get this right anyway
1279 */
1280 }
1281
1282 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1283
b352e57d
AC
1284 if (ata_id_is_cfa(id)) {
1285 /*
1286 * Process compact flash extended modes
1287 */
1288 int pio = id[163] & 0x7;
1289 int dma = (id[163] >> 3) & 7;
1290
1291 if (pio)
1292 pio_mask |= (1 << 5);
1293 if (pio > 1)
1294 pio_mask |= (1 << 6);
1295 if (dma)
1296 mwdma_mask |= (1 << 3);
1297 if (dma > 1)
1298 mwdma_mask |= (1 << 4);
1299 }
1300
fb21f0d0
TH
1301 udma_mask = 0;
1302 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1303 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1304
1305 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1306}
1307
86e45b6b
TH
1308/**
1309 * ata_port_queue_task - Queue port_task
1310 * @ap: The ata_port to queue port_task for
e2a7f77a 1311 * @fn: workqueue function to be scheduled
65f27f38 1312 * @data: data for @fn to use
e2a7f77a 1313 * @delay: delay time for workqueue function
86e45b6b
TH
1314 *
1315 * Schedule @fn(@data) for execution after @delay jiffies using
1316 * port_task. There is one port_task per port and it's the
1317 * user(low level driver)'s responsibility to make sure that only
1318 * one task is active at any given time.
1319 *
1320 * libata core layer takes care of synchronization between
1321 * port_task and EH. ata_port_queue_task() may be ignored for EH
1322 * synchronization.
1323 *
1324 * LOCKING:
1325 * Inherited from caller.
1326 */
65f27f38 1327void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1328 unsigned long delay)
1329{
65f27f38
DH
1330 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1331 ap->port_task_data = data;
86e45b6b 1332
45a66c1c
ON
1333 /* may fail if ata_port_flush_task() in progress */
1334 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1335}
1336
1337/**
1338 * ata_port_flush_task - Flush port_task
1339 * @ap: The ata_port to flush port_task for
1340 *
1341 * After this function completes, port_task is guranteed not to
1342 * be running or scheduled.
1343 *
1344 * LOCKING:
1345 * Kernel thread context (may sleep)
1346 */
1347void ata_port_flush_task(struct ata_port *ap)
1348{
86e45b6b
TH
1349 DPRINTK("ENTER\n");
1350
45a66c1c 1351 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1352
0dd4b21f
BP
1353 if (ata_msg_ctl(ap))
1354 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1355}
1356
7102d230 1357static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1358{
77853bf2 1359 struct completion *waiting = qc->private_data;
a2a7a662 1360
a2a7a662 1361 complete(waiting);
a2a7a662
TH
1362}
1363
1364/**
2432697b 1365 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1366 * @dev: Device to which the command is sent
1367 * @tf: Taskfile registers for the command and the result
d69cf37d 1368 * @cdb: CDB for packet command
a2a7a662 1369 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1370 * @sg: sg list for the data buffer of the command
1371 * @n_elem: Number of sg entries
a2a7a662
TH
1372 *
1373 * Executes libata internal command with timeout. @tf contains
1374 * command on entry and result on return. Timeout and error
1375 * conditions are reported via return value. No recovery action
1376 * is taken after a command times out. It's caller's duty to
1377 * clean up after timeout.
1378 *
1379 * LOCKING:
1380 * None. Should be called with kernel context, might sleep.
551e8889
TH
1381 *
1382 * RETURNS:
1383 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1384 */
2432697b
TH
1385unsigned ata_exec_internal_sg(struct ata_device *dev,
1386 struct ata_taskfile *tf, const u8 *cdb,
1387 int dma_dir, struct scatterlist *sg,
1388 unsigned int n_elem)
a2a7a662 1389{
9af5c9c9
TH
1390 struct ata_link *link = dev->link;
1391 struct ata_port *ap = link->ap;
a2a7a662
TH
1392 u8 command = tf->command;
1393 struct ata_queued_cmd *qc;
2ab7db1f 1394 unsigned int tag, preempted_tag;
dedaf2b0 1395 u32 preempted_sactive, preempted_qc_active;
da917d69 1396 int preempted_nr_active_links;
60be6b9a 1397 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1398 unsigned long flags;
77853bf2 1399 unsigned int err_mask;
d95a717f 1400 int rc;
a2a7a662 1401
ba6a1308 1402 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1403
e3180499 1404 /* no internal command while frozen */
b51e9e5d 1405 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1406 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1407 return AC_ERR_SYSTEM;
1408 }
1409
2ab7db1f 1410 /* initialize internal qc */
a2a7a662 1411
2ab7db1f
TH
1412 /* XXX: Tag 0 is used for drivers with legacy EH as some
1413 * drivers choke if any other tag is given. This breaks
1414 * ata_tag_internal() test for those drivers. Don't use new
1415 * EH stuff without converting to it.
1416 */
1417 if (ap->ops->error_handler)
1418 tag = ATA_TAG_INTERNAL;
1419 else
1420 tag = 0;
1421
6cec4a39 1422 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1423 BUG();
f69499f4 1424 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1425
1426 qc->tag = tag;
1427 qc->scsicmd = NULL;
1428 qc->ap = ap;
1429 qc->dev = dev;
1430 ata_qc_reinit(qc);
1431
9af5c9c9
TH
1432 preempted_tag = link->active_tag;
1433 preempted_sactive = link->sactive;
dedaf2b0 1434 preempted_qc_active = ap->qc_active;
da917d69 1435 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1436 link->active_tag = ATA_TAG_POISON;
1437 link->sactive = 0;
dedaf2b0 1438 ap->qc_active = 0;
da917d69 1439 ap->nr_active_links = 0;
2ab7db1f
TH
1440
1441 /* prepare & issue qc */
a2a7a662 1442 qc->tf = *tf;
d69cf37d
TH
1443 if (cdb)
1444 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1445 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1446 qc->dma_dir = dma_dir;
1447 if (dma_dir != DMA_NONE) {
2432697b
TH
1448 unsigned int i, buflen = 0;
1449
1450 for (i = 0; i < n_elem; i++)
1451 buflen += sg[i].length;
1452
1453 ata_sg_init(qc, sg, n_elem);
49c80429 1454 qc->nbytes = buflen;
a2a7a662
TH
1455 }
1456
77853bf2 1457 qc->private_data = &wait;
a2a7a662
TH
1458 qc->complete_fn = ata_qc_complete_internal;
1459
8e0e694a 1460 ata_qc_issue(qc);
a2a7a662 1461
ba6a1308 1462 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1463
a8601e5f 1464 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1465
1466 ata_port_flush_task(ap);
41ade50c 1467
d95a717f 1468 if (!rc) {
ba6a1308 1469 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1470
1471 /* We're racing with irq here. If we lose, the
1472 * following test prevents us from completing the qc
d95a717f
TH
1473 * twice. If we win, the port is frozen and will be
1474 * cleaned up by ->post_internal_cmd().
a2a7a662 1475 */
77853bf2 1476 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1477 qc->err_mask |= AC_ERR_TIMEOUT;
1478
1479 if (ap->ops->error_handler)
1480 ata_port_freeze(ap);
1481 else
1482 ata_qc_complete(qc);
f15a1daf 1483
0dd4b21f
BP
1484 if (ata_msg_warn(ap))
1485 ata_dev_printk(dev, KERN_WARNING,
88574551 1486 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1487 }
1488
ba6a1308 1489 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1490 }
1491
d95a717f
TH
1492 /* do post_internal_cmd */
1493 if (ap->ops->post_internal_cmd)
1494 ap->ops->post_internal_cmd(qc);
1495
a51d644a
TH
1496 /* perform minimal error analysis */
1497 if (qc->flags & ATA_QCFLAG_FAILED) {
1498 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1499 qc->err_mask |= AC_ERR_DEV;
1500
1501 if (!qc->err_mask)
1502 qc->err_mask |= AC_ERR_OTHER;
1503
1504 if (qc->err_mask & ~AC_ERR_OTHER)
1505 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1506 }
1507
15869303 1508 /* finish up */
ba6a1308 1509 spin_lock_irqsave(ap->lock, flags);
15869303 1510
e61e0672 1511 *tf = qc->result_tf;
77853bf2
TH
1512 err_mask = qc->err_mask;
1513
1514 ata_qc_free(qc);
9af5c9c9
TH
1515 link->active_tag = preempted_tag;
1516 link->sactive = preempted_sactive;
dedaf2b0 1517 ap->qc_active = preempted_qc_active;
da917d69 1518 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1519
1f7dd3e9
TH
1520 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1521 * Until those drivers are fixed, we detect the condition
1522 * here, fail the command with AC_ERR_SYSTEM and reenable the
1523 * port.
1524 *
1525 * Note that this doesn't change any behavior as internal
1526 * command failure results in disabling the device in the
1527 * higher layer for LLDDs without new reset/EH callbacks.
1528 *
1529 * Kill the following code as soon as those drivers are fixed.
1530 */
198e0fed 1531 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1532 err_mask |= AC_ERR_SYSTEM;
1533 ata_port_probe(ap);
1534 }
1535
ba6a1308 1536 spin_unlock_irqrestore(ap->lock, flags);
15869303 1537
77853bf2 1538 return err_mask;
a2a7a662
TH
1539}
1540
2432697b 1541/**
33480a0e 1542 * ata_exec_internal - execute libata internal command
2432697b
TH
1543 * @dev: Device to which the command is sent
1544 * @tf: Taskfile registers for the command and the result
1545 * @cdb: CDB for packet command
1546 * @dma_dir: Data tranfer direction of the command
1547 * @buf: Data buffer of the command
1548 * @buflen: Length of data buffer
1549 *
1550 * Wrapper around ata_exec_internal_sg() which takes simple
1551 * buffer instead of sg list.
1552 *
1553 * LOCKING:
1554 * None. Should be called with kernel context, might sleep.
1555 *
1556 * RETURNS:
1557 * Zero on success, AC_ERR_* mask on failure
1558 */
1559unsigned ata_exec_internal(struct ata_device *dev,
1560 struct ata_taskfile *tf, const u8 *cdb,
1561 int dma_dir, void *buf, unsigned int buflen)
1562{
33480a0e
TH
1563 struct scatterlist *psg = NULL, sg;
1564 unsigned int n_elem = 0;
2432697b 1565
33480a0e
TH
1566 if (dma_dir != DMA_NONE) {
1567 WARN_ON(!buf);
1568 sg_init_one(&sg, buf, buflen);
1569 psg = &sg;
1570 n_elem++;
1571 }
2432697b 1572
33480a0e 1573 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1574}
1575
977e6b9f
TH
1576/**
1577 * ata_do_simple_cmd - execute simple internal command
1578 * @dev: Device to which the command is sent
1579 * @cmd: Opcode to execute
1580 *
1581 * Execute a 'simple' command, that only consists of the opcode
1582 * 'cmd' itself, without filling any other registers
1583 *
1584 * LOCKING:
1585 * Kernel thread context (may sleep).
1586 *
1587 * RETURNS:
1588 * Zero on success, AC_ERR_* mask on failure
e58eb583 1589 */
77b08fb5 1590unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1591{
1592 struct ata_taskfile tf;
e58eb583
TH
1593
1594 ata_tf_init(dev, &tf);
1595
1596 tf.command = cmd;
1597 tf.flags |= ATA_TFLAG_DEVICE;
1598 tf.protocol = ATA_PROT_NODATA;
1599
977e6b9f 1600 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1601}
1602
1bc4ccff
AC
1603/**
1604 * ata_pio_need_iordy - check if iordy needed
1605 * @adev: ATA device
1606 *
1607 * Check if the current speed of the device requires IORDY. Used
1608 * by various controllers for chip configuration.
1609 */
a617c09f 1610
1bc4ccff
AC
1611unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1612{
432729f0
AC
1613 /* Controller doesn't support IORDY. Probably a pointless check
1614 as the caller should know this */
9af5c9c9 1615 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1616 return 0;
432729f0
AC
1617 /* PIO3 and higher it is mandatory */
1618 if (adev->pio_mode > XFER_PIO_2)
1619 return 1;
1620 /* We turn it on when possible */
1621 if (ata_id_has_iordy(adev->id))
1bc4ccff 1622 return 1;
432729f0
AC
1623 return 0;
1624}
2e9edbf8 1625
432729f0
AC
1626/**
1627 * ata_pio_mask_no_iordy - Return the non IORDY mask
1628 * @adev: ATA device
1629 *
1630 * Compute the highest mode possible if we are not using iordy. Return
1631 * -1 if no iordy mode is available.
1632 */
a617c09f 1633
432729f0
AC
1634static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1635{
1bc4ccff 1636 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1637 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1638 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1639 /* Is the speed faster than the drive allows non IORDY ? */
1640 if (pio) {
1641 /* This is cycle times not frequency - watch the logic! */
1642 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1643 return 3 << ATA_SHIFT_PIO;
1644 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1645 }
1646 }
432729f0 1647 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1648}
1649
1da177e4 1650/**
49016aca 1651 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1652 * @dev: target device
1653 * @p_class: pointer to class of the target device (may be changed)
bff04647 1654 * @flags: ATA_READID_* flags
fe635c7e 1655 * @id: buffer to read IDENTIFY data into
1da177e4 1656 *
49016aca
TH
1657 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1658 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1659 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1660 * for pre-ATA4 drives.
1da177e4 1661 *
50a99018
AC
1662 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1663 * now we abort if we hit that case.
1664 *
1da177e4 1665 * LOCKING:
49016aca
TH
1666 * Kernel thread context (may sleep)
1667 *
1668 * RETURNS:
1669 * 0 on success, -errno otherwise.
1da177e4 1670 */
a9beec95 1671int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1672 unsigned int flags, u16 *id)
1da177e4 1673{
9af5c9c9 1674 struct ata_port *ap = dev->link->ap;
49016aca 1675 unsigned int class = *p_class;
a0123703 1676 struct ata_taskfile tf;
49016aca
TH
1677 unsigned int err_mask = 0;
1678 const char *reason;
54936f8b 1679 int may_fallback = 1, tried_spinup = 0;
49016aca 1680 int rc;
1da177e4 1681
0dd4b21f 1682 if (ata_msg_ctl(ap))
44877b4e 1683 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1684
49016aca 1685 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1686 retry:
3373efd8 1687 ata_tf_init(dev, &tf);
a0123703 1688
49016aca
TH
1689 switch (class) {
1690 case ATA_DEV_ATA:
a0123703 1691 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1692 break;
1693 case ATA_DEV_ATAPI:
a0123703 1694 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1695 break;
1696 default:
1697 rc = -ENODEV;
1698 reason = "unsupported class";
1699 goto err_out;
1da177e4
LT
1700 }
1701
a0123703 1702 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1703
1704 /* Some devices choke if TF registers contain garbage. Make
1705 * sure those are properly initialized.
1706 */
1707 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1708
1709 /* Device presence detection is unreliable on some
1710 * controllers. Always poll IDENTIFY if available.
1711 */
1712 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1713
3373efd8 1714 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1715 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1716 if (err_mask) {
800b3996 1717 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1718 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1719 ap->print_id, dev->devno);
55a8e2c8
TH
1720 return -ENOENT;
1721 }
1722
54936f8b
TH
1723 /* Device or controller might have reported the wrong
1724 * device class. Give a shot at the other IDENTIFY if
1725 * the current one is aborted by the device.
1726 */
1727 if (may_fallback &&
1728 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1729 may_fallback = 0;
1730
1731 if (class == ATA_DEV_ATA)
1732 class = ATA_DEV_ATAPI;
1733 else
1734 class = ATA_DEV_ATA;
1735 goto retry;
1736 }
1737
49016aca
TH
1738 rc = -EIO;
1739 reason = "I/O error";
1da177e4
LT
1740 goto err_out;
1741 }
1742
54936f8b
TH
1743 /* Falling back doesn't make sense if ID data was read
1744 * successfully at least once.
1745 */
1746 may_fallback = 0;
1747
49016aca 1748 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1749
49016aca 1750 /* sanity check */
a4f5749b 1751 rc = -EINVAL;
6070068b 1752 reason = "device reports invalid type";
a4f5749b
TH
1753
1754 if (class == ATA_DEV_ATA) {
1755 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1756 goto err_out;
1757 } else {
1758 if (ata_id_is_ata(id))
1759 goto err_out;
49016aca
TH
1760 }
1761
169439c2
ML
1762 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1763 tried_spinup = 1;
1764 /*
1765 * Drive powered-up in standby mode, and requires a specific
1766 * SET_FEATURES spin-up subcommand before it will accept
1767 * anything other than the original IDENTIFY command.
1768 */
1769 ata_tf_init(dev, &tf);
1770 tf.command = ATA_CMD_SET_FEATURES;
1771 tf.feature = SETFEATURES_SPINUP;
1772 tf.protocol = ATA_PROT_NODATA;
1773 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1774 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
fb0582f9 1775 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1776 rc = -EIO;
1777 reason = "SPINUP failed";
1778 goto err_out;
1779 }
1780 /*
1781 * If the drive initially returned incomplete IDENTIFY info,
1782 * we now must reissue the IDENTIFY command.
1783 */
1784 if (id[2] == 0x37c8)
1785 goto retry;
1786 }
1787
bff04647 1788 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1789 /*
1790 * The exact sequence expected by certain pre-ATA4 drives is:
1791 * SRST RESET
50a99018
AC
1792 * IDENTIFY (optional in early ATA)
1793 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1794 * anything else..
1795 * Some drives were very specific about that exact sequence.
50a99018
AC
1796 *
1797 * Note that ATA4 says lba is mandatory so the second check
1798 * shoud never trigger.
49016aca
TH
1799 */
1800 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1801 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1802 if (err_mask) {
1803 rc = -EIO;
1804 reason = "INIT_DEV_PARAMS failed";
1805 goto err_out;
1806 }
1807
1808 /* current CHS translation info (id[53-58]) might be
1809 * changed. reread the identify device info.
1810 */
bff04647 1811 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1812 goto retry;
1813 }
1814 }
1815
1816 *p_class = class;
fe635c7e 1817
49016aca
TH
1818 return 0;
1819
1820 err_out:
88574551 1821 if (ata_msg_warn(ap))
0dd4b21f 1822 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1823 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1824 return rc;
1825}
1826
3373efd8 1827static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1828{
9af5c9c9
TH
1829 struct ata_port *ap = dev->link->ap;
1830 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1831}
1832
a6e6ce8e
TH
1833static void ata_dev_config_ncq(struct ata_device *dev,
1834 char *desc, size_t desc_sz)
1835{
9af5c9c9 1836 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
1837 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1838
1839 if (!ata_id_has_ncq(dev->id)) {
1840 desc[0] = '\0';
1841 return;
1842 }
75683fe7 1843 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
1844 snprintf(desc, desc_sz, "NCQ (not used)");
1845 return;
1846 }
a6e6ce8e 1847 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1848 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1849 dev->flags |= ATA_DFLAG_NCQ;
1850 }
1851
1852 if (hdepth >= ddepth)
1853 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1854 else
1855 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1856}
1857
49016aca 1858/**
ffeae418 1859 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1860 * @dev: Target device to configure
1861 *
1862 * Configure @dev according to @dev->id. Generic and low-level
1863 * driver specific fixups are also applied.
49016aca
TH
1864 *
1865 * LOCKING:
ffeae418
TH
1866 * Kernel thread context (may sleep)
1867 *
1868 * RETURNS:
1869 * 0 on success, -errno otherwise
49016aca 1870 */
efdaedc4 1871int ata_dev_configure(struct ata_device *dev)
49016aca 1872{
9af5c9c9
TH
1873 struct ata_port *ap = dev->link->ap;
1874 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 1875 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1876 const u16 *id = dev->id;
ff8854b2 1877 unsigned int xfer_mask;
b352e57d 1878 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1879 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1880 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1881 int rc;
49016aca 1882
0dd4b21f 1883 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1884 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1885 __FUNCTION__);
ffeae418 1886 return 0;
49016aca
TH
1887 }
1888
0dd4b21f 1889 if (ata_msg_probe(ap))
44877b4e 1890 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1891
75683fe7
TH
1892 /* set horkage */
1893 dev->horkage |= ata_dev_blacklisted(dev);
1894
6746544c
TH
1895 /* let ACPI work its magic */
1896 rc = ata_acpi_on_devcfg(dev);
1897 if (rc)
1898 return rc;
08573a86 1899
05027adc
TH
1900 /* massage HPA, do it early as it might change IDENTIFY data */
1901 rc = ata_hpa_resize(dev);
1902 if (rc)
1903 return rc;
1904
c39f5ebe 1905 /* print device capabilities */
0dd4b21f 1906 if (ata_msg_probe(ap))
88574551
TH
1907 ata_dev_printk(dev, KERN_DEBUG,
1908 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1909 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1910 __FUNCTION__,
f15a1daf
TH
1911 id[49], id[82], id[83], id[84],
1912 id[85], id[86], id[87], id[88]);
c39f5ebe 1913
208a9933 1914 /* initialize to-be-configured parameters */
ea1dd4e1 1915 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1916 dev->max_sectors = 0;
1917 dev->cdb_len = 0;
1918 dev->n_sectors = 0;
1919 dev->cylinders = 0;
1920 dev->heads = 0;
1921 dev->sectors = 0;
1922
1da177e4
LT
1923 /*
1924 * common ATA, ATAPI feature tests
1925 */
1926
ff8854b2 1927 /* find max transfer mode; for printk only */
1148c3a7 1928 xfer_mask = ata_id_xfermask(id);
1da177e4 1929
0dd4b21f
BP
1930 if (ata_msg_probe(ap))
1931 ata_dump_id(id);
1da177e4 1932
ef143d57
AL
1933 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1934 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1935 sizeof(fwrevbuf));
1936
1937 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1938 sizeof(modelbuf));
1939
1da177e4
LT
1940 /* ATA-specific feature tests */
1941 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1942 if (ata_id_is_cfa(id)) {
1943 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1944 ata_dev_printk(dev, KERN_WARNING,
1945 "supports DRM functions and may "
1946 "not be fully accessable.\n");
b352e57d
AC
1947 snprintf(revbuf, 7, "CFA");
1948 }
1949 else
1950 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1951
1148c3a7 1952 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1953
3f64f565
EM
1954 if (dev->id[59] & 0x100)
1955 dev->multi_count = dev->id[59] & 0xff;
1956
1148c3a7 1957 if (ata_id_has_lba(id)) {
4c2d721a 1958 const char *lba_desc;
a6e6ce8e 1959 char ncq_desc[20];
8bf62ece 1960
4c2d721a
TH
1961 lba_desc = "LBA";
1962 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1963 if (ata_id_has_lba48(id)) {
8bf62ece 1964 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1965 lba_desc = "LBA48";
6fc49adb
TH
1966
1967 if (dev->n_sectors >= (1UL << 28) &&
1968 ata_id_has_flush_ext(id))
1969 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1970 }
8bf62ece 1971
a6e6ce8e
TH
1972 /* config NCQ */
1973 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1974
8bf62ece 1975 /* print device info to dmesg */
3f64f565
EM
1976 if (ata_msg_drv(ap) && print_info) {
1977 ata_dev_printk(dev, KERN_INFO,
1978 "%s: %s, %s, max %s\n",
1979 revbuf, modelbuf, fwrevbuf,
1980 ata_mode_string(xfer_mask));
1981 ata_dev_printk(dev, KERN_INFO,
1982 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1983 (unsigned long long)dev->n_sectors,
3f64f565
EM
1984 dev->multi_count, lba_desc, ncq_desc);
1985 }
ffeae418 1986 } else {
8bf62ece
AL
1987 /* CHS */
1988
1989 /* Default translation */
1148c3a7
TH
1990 dev->cylinders = id[1];
1991 dev->heads = id[3];
1992 dev->sectors = id[6];
8bf62ece 1993
1148c3a7 1994 if (ata_id_current_chs_valid(id)) {
8bf62ece 1995 /* Current CHS translation is valid. */
1148c3a7
TH
1996 dev->cylinders = id[54];
1997 dev->heads = id[55];
1998 dev->sectors = id[56];
8bf62ece
AL
1999 }
2000
2001 /* print device info to dmesg */
3f64f565 2002 if (ata_msg_drv(ap) && print_info) {
88574551 2003 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2004 "%s: %s, %s, max %s\n",
2005 revbuf, modelbuf, fwrevbuf,
2006 ata_mode_string(xfer_mask));
a84471fe 2007 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2008 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2009 (unsigned long long)dev->n_sectors,
2010 dev->multi_count, dev->cylinders,
2011 dev->heads, dev->sectors);
2012 }
07f6f7d0
AL
2013 }
2014
6e7846e9 2015 dev->cdb_len = 16;
1da177e4
LT
2016 }
2017
2018 /* ATAPI-specific feature tests */
2c13b7ce 2019 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2020 const char *cdb_intr_string = "";
2021 const char *atapi_an_string = "";
7d77b247 2022 u32 sntf;
08a556db 2023
1148c3a7 2024 rc = atapi_cdb_len(id);
1da177e4 2025 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2026 if (ata_msg_warn(ap))
88574551
TH
2027 ata_dev_printk(dev, KERN_WARNING,
2028 "unsupported CDB len\n");
ffeae418 2029 rc = -EINVAL;
1da177e4
LT
2030 goto err_out_nosup;
2031 }
6e7846e9 2032 dev->cdb_len = (unsigned int) rc;
1da177e4 2033
7d77b247
TH
2034 /* Enable ATAPI AN if both the host and device have
2035 * the support. If PMP is attached, SNTF is required
2036 * to enable ATAPI AN to discern between PHY status
2037 * changed notifications and ATAPI ANs.
9f45cbd3 2038 */
7d77b247
TH
2039 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2040 (!ap->nr_pmp_links ||
2041 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2042 unsigned int err_mask;
2043
9f45cbd3 2044 /* issue SET feature command to turn this on */
854c73a2
TH
2045 err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2046 if (err_mask)
9f45cbd3 2047 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2048 "failed to enable ATAPI AN "
2049 "(err_mask=0x%x)\n", err_mask);
2050 else {
9f45cbd3 2051 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2052 atapi_an_string = ", ATAPI AN";
2053 }
9f45cbd3
KCA
2054 }
2055
08a556db 2056 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2057 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2058 cdb_intr_string = ", CDB intr";
2059 }
312f7da2 2060
1da177e4 2061 /* print device info to dmesg */
5afc8142 2062 if (ata_msg_drv(ap) && print_info)
ef143d57 2063 ata_dev_printk(dev, KERN_INFO,
854c73a2 2064 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2065 modelbuf, fwrevbuf,
12436c30 2066 ata_mode_string(xfer_mask),
854c73a2 2067 cdb_intr_string, atapi_an_string);
1da177e4
LT
2068 }
2069
914ed354
TH
2070 /* determine max_sectors */
2071 dev->max_sectors = ATA_MAX_SECTORS;
2072 if (dev->flags & ATA_DFLAG_LBA48)
2073 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2074
93590859
AC
2075 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2076 /* Let the user know. We don't want to disallow opens for
2077 rescue purposes, or in case the vendor is just a blithering
2078 idiot */
2079 if (print_info) {
2080 ata_dev_printk(dev, KERN_WARNING,
2081"Drive reports diagnostics failure. This may indicate a drive\n");
2082 ata_dev_printk(dev, KERN_WARNING,
2083"fault or invalid emulation. Contact drive vendor for information.\n");
2084 }
2085 }
2086
4b2f3ede 2087 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2088 if (ata_dev_knobble(dev)) {
5afc8142 2089 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2090 ata_dev_printk(dev, KERN_INFO,
2091 "applying bridge limits\n");
5a529139 2092 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2093 dev->max_sectors = ATA_MAX_SECTORS;
2094 }
2095
75683fe7 2096 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2097 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2098 dev->max_sectors);
18d6e9d5 2099
4b2f3ede 2100 if (ap->ops->dev_config)
cd0d3bbc 2101 ap->ops->dev_config(dev);
4b2f3ede 2102
0dd4b21f
BP
2103 if (ata_msg_probe(ap))
2104 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2105 __FUNCTION__, ata_chk_status(ap));
ffeae418 2106 return 0;
1da177e4
LT
2107
2108err_out_nosup:
0dd4b21f 2109 if (ata_msg_probe(ap))
88574551
TH
2110 ata_dev_printk(dev, KERN_DEBUG,
2111 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2112 return rc;
1da177e4
LT
2113}
2114
be0d18df 2115/**
2e41e8e6 2116 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2117 * @ap: port
2118 *
2e41e8e6 2119 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2120 * detection.
2121 */
2122
2123int ata_cable_40wire(struct ata_port *ap)
2124{
2125 return ATA_CBL_PATA40;
2126}
2127
2128/**
2e41e8e6 2129 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2130 * @ap: port
2131 *
2e41e8e6 2132 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2133 * detection.
2134 */
2135
2136int ata_cable_80wire(struct ata_port *ap)
2137{
2138 return ATA_CBL_PATA80;
2139}
2140
2141/**
2142 * ata_cable_unknown - return unknown PATA cable.
2143 * @ap: port
2144 *
2145 * Helper method for drivers which have no PATA cable detection.
2146 */
2147
2148int ata_cable_unknown(struct ata_port *ap)
2149{
2150 return ATA_CBL_PATA_UNK;
2151}
2152
2153/**
2154 * ata_cable_sata - return SATA cable type
2155 * @ap: port
2156 *
2157 * Helper method for drivers which have SATA cables
2158 */
2159
2160int ata_cable_sata(struct ata_port *ap)
2161{
2162 return ATA_CBL_SATA;
2163}
2164
1da177e4
LT
2165/**
2166 * ata_bus_probe - Reset and probe ATA bus
2167 * @ap: Bus to probe
2168 *
0cba632b
JG
2169 * Master ATA bus probing function. Initiates a hardware-dependent
2170 * bus reset, then attempts to identify any devices found on
2171 * the bus.
2172 *
1da177e4 2173 * LOCKING:
0cba632b 2174 * PCI/etc. bus probe sem.
1da177e4
LT
2175 *
2176 * RETURNS:
96072e69 2177 * Zero on success, negative errno otherwise.
1da177e4
LT
2178 */
2179
80289167 2180int ata_bus_probe(struct ata_port *ap)
1da177e4 2181{
28ca5c57 2182 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2183 int tries[ATA_MAX_DEVICES];
f58229f8 2184 int rc;
e82cbdb9 2185 struct ata_device *dev;
1da177e4 2186
28ca5c57 2187 ata_port_probe(ap);
c19ba8af 2188
f58229f8
TH
2189 ata_link_for_each_dev(dev, &ap->link)
2190 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2191
2192 retry:
2044470c 2193 /* reset and determine device classes */
52783c5d 2194 ap->ops->phy_reset(ap);
2061a47a 2195
f58229f8 2196 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2197 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2198 dev->class != ATA_DEV_UNKNOWN)
2199 classes[dev->devno] = dev->class;
2200 else
2201 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2202
52783c5d 2203 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2204 }
1da177e4 2205
52783c5d 2206 ata_port_probe(ap);
2044470c 2207
b6079ca4
AC
2208 /* after the reset the device state is PIO 0 and the controller
2209 state is undefined. Record the mode */
2210
f58229f8
TH
2211 ata_link_for_each_dev(dev, &ap->link)
2212 dev->pio_mode = XFER_PIO_0;
b6079ca4 2213
f31f0cc2
JG
2214 /* read IDENTIFY page and configure devices. We have to do the identify
2215 specific sequence bass-ackwards so that PDIAG- is released by
2216 the slave device */
2217
f58229f8
TH
2218 ata_link_for_each_dev(dev, &ap->link) {
2219 if (tries[dev->devno])
2220 dev->class = classes[dev->devno];
ffeae418 2221
14d2bac1 2222 if (!ata_dev_enabled(dev))
ffeae418 2223 continue;
ffeae418 2224
bff04647
TH
2225 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2226 dev->id);
14d2bac1
TH
2227 if (rc)
2228 goto fail;
f31f0cc2
JG
2229 }
2230
be0d18df
AC
2231 /* Now ask for the cable type as PDIAG- should have been released */
2232 if (ap->ops->cable_detect)
2233 ap->cbl = ap->ops->cable_detect(ap);
2234
614fe29b
AC
2235 /* We may have SATA bridge glue hiding here irrespective of the
2236 reported cable types and sensed types */
2237 ata_link_for_each_dev(dev, &ap->link) {
2238 if (!ata_dev_enabled(dev))
2239 continue;
2240 /* SATA drives indicate we have a bridge. We don't know which
2241 end of the link the bridge is which is a problem */
2242 if (ata_id_is_sata(dev->id))
2243 ap->cbl = ATA_CBL_SATA;
2244 }
2245
f31f0cc2
JG
2246 /* After the identify sequence we can now set up the devices. We do
2247 this in the normal order so that the user doesn't get confused */
2248
f58229f8 2249 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2250 if (!ata_dev_enabled(dev))
2251 continue;
14d2bac1 2252
9af5c9c9 2253 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2254 rc = ata_dev_configure(dev);
9af5c9c9 2255 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2256 if (rc)
2257 goto fail;
1da177e4
LT
2258 }
2259
e82cbdb9 2260 /* configure transfer mode */
0260731f 2261 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2262 if (rc)
51713d35 2263 goto fail;
1da177e4 2264
f58229f8
TH
2265 ata_link_for_each_dev(dev, &ap->link)
2266 if (ata_dev_enabled(dev))
e82cbdb9 2267 return 0;
1da177e4 2268
e82cbdb9
TH
2269 /* no device present, disable port */
2270 ata_port_disable(ap);
96072e69 2271 return -ENODEV;
14d2bac1
TH
2272
2273 fail:
4ae72a1e
TH
2274 tries[dev->devno]--;
2275
14d2bac1
TH
2276 switch (rc) {
2277 case -EINVAL:
4ae72a1e 2278 /* eeek, something went very wrong, give up */
14d2bac1
TH
2279 tries[dev->devno] = 0;
2280 break;
4ae72a1e
TH
2281
2282 case -ENODEV:
2283 /* give it just one more chance */
2284 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2285 case -EIO:
4ae72a1e
TH
2286 if (tries[dev->devno] == 1) {
2287 /* This is the last chance, better to slow
2288 * down than lose it.
2289 */
936fd732 2290 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2291 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2292 }
14d2bac1
TH
2293 }
2294
4ae72a1e 2295 if (!tries[dev->devno])
3373efd8 2296 ata_dev_disable(dev);
ec573755 2297
14d2bac1 2298 goto retry;
1da177e4
LT
2299}
2300
2301/**
0cba632b
JG
2302 * ata_port_probe - Mark port as enabled
2303 * @ap: Port for which we indicate enablement
1da177e4 2304 *
0cba632b
JG
2305 * Modify @ap data structure such that the system
2306 * thinks that the entire port is enabled.
2307 *
cca3974e 2308 * LOCKING: host lock, or some other form of
0cba632b 2309 * serialization.
1da177e4
LT
2310 */
2311
2312void ata_port_probe(struct ata_port *ap)
2313{
198e0fed 2314 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2315}
2316
3be680b7
TH
2317/**
2318 * sata_print_link_status - Print SATA link status
936fd732 2319 * @link: SATA link to printk link status about
3be680b7
TH
2320 *
2321 * This function prints link speed and status of a SATA link.
2322 *
2323 * LOCKING:
2324 * None.
2325 */
936fd732 2326void sata_print_link_status(struct ata_link *link)
3be680b7 2327{
6d5f9732 2328 u32 sstatus, scontrol, tmp;
3be680b7 2329
936fd732 2330 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2331 return;
936fd732 2332 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2333
936fd732 2334 if (ata_link_online(link)) {
3be680b7 2335 tmp = (sstatus >> 4) & 0xf;
936fd732 2336 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2337 "SATA link up %s (SStatus %X SControl %X)\n",
2338 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2339 } else {
936fd732 2340 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2341 "SATA link down (SStatus %X SControl %X)\n",
2342 sstatus, scontrol);
3be680b7
TH
2343 }
2344}
2345
1da177e4 2346/**
780a87f7
JG
2347 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2348 * @ap: SATA port associated with target SATA PHY.
1da177e4 2349 *
780a87f7
JG
2350 * This function issues commands to standard SATA Sxxx
2351 * PHY registers, to wake up the phy (and device), and
2352 * clear any reset condition.
1da177e4
LT
2353 *
2354 * LOCKING:
0cba632b 2355 * PCI/etc. bus probe sem.
1da177e4
LT
2356 *
2357 */
2358void __sata_phy_reset(struct ata_port *ap)
2359{
936fd732 2360 struct ata_link *link = &ap->link;
1da177e4 2361 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2362 u32 sstatus;
1da177e4
LT
2363
2364 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2365 /* issue phy wake/reset */
936fd732 2366 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2367 /* Couldn't find anything in SATA I/II specs, but
2368 * AHCI-1.1 10.4.2 says at least 1 ms. */
2369 mdelay(1);
1da177e4 2370 }
81952c54 2371 /* phy wake/clear reset */
936fd732 2372 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2373
2374 /* wait for phy to become ready, if necessary */
2375 do {
2376 msleep(200);
936fd732 2377 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2378 if ((sstatus & 0xf) != 1)
2379 break;
2380 } while (time_before(jiffies, timeout));
2381
3be680b7 2382 /* print link status */
936fd732 2383 sata_print_link_status(link);
656563e3 2384
3be680b7 2385 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2386 if (!ata_link_offline(link))
1da177e4 2387 ata_port_probe(ap);
3be680b7 2388 else
1da177e4 2389 ata_port_disable(ap);
1da177e4 2390
198e0fed 2391 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2392 return;
2393
2394 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2395 ata_port_disable(ap);
2396 return;
2397 }
2398
2399 ap->cbl = ATA_CBL_SATA;
2400}
2401
2402/**
780a87f7
JG
2403 * sata_phy_reset - Reset SATA bus.
2404 * @ap: SATA port associated with target SATA PHY.
1da177e4 2405 *
780a87f7
JG
2406 * This function resets the SATA bus, and then probes
2407 * the bus for devices.
1da177e4
LT
2408 *
2409 * LOCKING:
0cba632b 2410 * PCI/etc. bus probe sem.
1da177e4
LT
2411 *
2412 */
2413void sata_phy_reset(struct ata_port *ap)
2414{
2415 __sata_phy_reset(ap);
198e0fed 2416 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2417 return;
2418 ata_bus_reset(ap);
2419}
2420
ebdfca6e
AC
2421/**
2422 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2423 * @adev: device
2424 *
2425 * Obtain the other device on the same cable, or if none is
2426 * present NULL is returned
2427 */
2e9edbf8 2428
3373efd8 2429struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2430{
9af5c9c9
TH
2431 struct ata_link *link = adev->link;
2432 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2433 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2434 return NULL;
2435 return pair;
2436}
2437
1da177e4 2438/**
780a87f7
JG
2439 * ata_port_disable - Disable port.
2440 * @ap: Port to be disabled.
1da177e4 2441 *
780a87f7
JG
2442 * Modify @ap data structure such that the system
2443 * thinks that the entire port is disabled, and should
2444 * never attempt to probe or communicate with devices
2445 * on this port.
2446 *
cca3974e 2447 * LOCKING: host lock, or some other form of
780a87f7 2448 * serialization.
1da177e4
LT
2449 */
2450
2451void ata_port_disable(struct ata_port *ap)
2452{
9af5c9c9
TH
2453 ap->link.device[0].class = ATA_DEV_NONE;
2454 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2455 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2456}
2457
1c3fae4d 2458/**
3c567b7d 2459 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2460 * @link: Link to adjust SATA spd limit for
1c3fae4d 2461 *
936fd732 2462 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2463 * function only adjusts the limit. The change must be applied
3c567b7d 2464 * using sata_set_spd().
1c3fae4d
TH
2465 *
2466 * LOCKING:
2467 * Inherited from caller.
2468 *
2469 * RETURNS:
2470 * 0 on success, negative errno on failure
2471 */
936fd732 2472int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2473{
81952c54
TH
2474 u32 sstatus, spd, mask;
2475 int rc, highbit;
1c3fae4d 2476
936fd732 2477 if (!sata_scr_valid(link))
008a7896
TH
2478 return -EOPNOTSUPP;
2479
2480 /* If SCR can be read, use it to determine the current SPD.
936fd732 2481 * If not, use cached value in link->sata_spd.
008a7896 2482 */
936fd732 2483 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2484 if (rc == 0)
2485 spd = (sstatus >> 4) & 0xf;
2486 else
936fd732 2487 spd = link->sata_spd;
1c3fae4d 2488
936fd732 2489 mask = link->sata_spd_limit;
1c3fae4d
TH
2490 if (mask <= 1)
2491 return -EINVAL;
008a7896
TH
2492
2493 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2494 highbit = fls(mask) - 1;
2495 mask &= ~(1 << highbit);
2496
008a7896
TH
2497 /* Mask off all speeds higher than or equal to the current
2498 * one. Force 1.5Gbps if current SPD is not available.
2499 */
2500 if (spd > 1)
2501 mask &= (1 << (spd - 1)) - 1;
2502 else
2503 mask &= 1;
2504
2505 /* were we already at the bottom? */
1c3fae4d
TH
2506 if (!mask)
2507 return -EINVAL;
2508
936fd732 2509 link->sata_spd_limit = mask;
1c3fae4d 2510
936fd732 2511 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2512 sata_spd_string(fls(mask)));
1c3fae4d
TH
2513
2514 return 0;
2515}
2516
936fd732 2517static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d
TH
2518{
2519 u32 spd, limit;
2520
936fd732 2521 if (link->sata_spd_limit == UINT_MAX)
1c3fae4d
TH
2522 limit = 0;
2523 else
936fd732 2524 limit = fls(link->sata_spd_limit);
1c3fae4d
TH
2525
2526 spd = (*scontrol >> 4) & 0xf;
2527 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2528
2529 return spd != limit;
2530}
2531
2532/**
3c567b7d 2533 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2534 * @link: Link in question
1c3fae4d
TH
2535 *
2536 * Test whether the spd limit in SControl matches
936fd732 2537 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2538 * whether hardreset is necessary to apply SATA spd
2539 * configuration.
2540 *
2541 * LOCKING:
2542 * Inherited from caller.
2543 *
2544 * RETURNS:
2545 * 1 if SATA spd configuration is needed, 0 otherwise.
2546 */
936fd732 2547int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2548{
2549 u32 scontrol;
2550
936fd732 2551 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2552 return 0;
2553
936fd732 2554 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2555}
2556
2557/**
3c567b7d 2558 * sata_set_spd - set SATA spd according to spd limit
936fd732 2559 * @link: Link to set SATA spd for
1c3fae4d 2560 *
936fd732 2561 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2562 *
2563 * LOCKING:
2564 * Inherited from caller.
2565 *
2566 * RETURNS:
2567 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2568 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2569 */
936fd732 2570int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2571{
2572 u32 scontrol;
81952c54 2573 int rc;
1c3fae4d 2574
936fd732 2575 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2576 return rc;
1c3fae4d 2577
936fd732 2578 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2579 return 0;
2580
936fd732 2581 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2582 return rc;
2583
1c3fae4d
TH
2584 return 1;
2585}
2586
452503f9
AC
2587/*
2588 * This mode timing computation functionality is ported over from
2589 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2590 */
2591/*
b352e57d 2592 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2593 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2594 * for UDMA6, which is currently supported only by Maxtor drives.
2595 *
2596 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2597 */
2598
2599static const struct ata_timing ata_timing[] = {
2600
2601 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2602 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2603 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2604 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2605
b352e57d
AC
2606 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2607 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2608 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2609 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2610 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2611
2612/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2613
452503f9
AC
2614 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2615 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2616 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2617
452503f9
AC
2618 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2619 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2620 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2621
b352e57d
AC
2622 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2623 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2624 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2625 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2626
2627 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2628 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2629 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2630
2631/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2632
2633 { 0xFF }
2634};
2635
2636#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2637#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2638
2639static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2640{
2641 q->setup = EZ(t->setup * 1000, T);
2642 q->act8b = EZ(t->act8b * 1000, T);
2643 q->rec8b = EZ(t->rec8b * 1000, T);
2644 q->cyc8b = EZ(t->cyc8b * 1000, T);
2645 q->active = EZ(t->active * 1000, T);
2646 q->recover = EZ(t->recover * 1000, T);
2647 q->cycle = EZ(t->cycle * 1000, T);
2648 q->udma = EZ(t->udma * 1000, UT);
2649}
2650
2651void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2652 struct ata_timing *m, unsigned int what)
2653{
2654 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2655 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2656 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2657 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2658 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2659 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2660 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2661 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2662}
2663
2664static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2665{
2666 const struct ata_timing *t;
2667
2668 for (t = ata_timing; t->mode != speed; t++)
91190758 2669 if (t->mode == 0xFF)
452503f9 2670 return NULL;
2e9edbf8 2671 return t;
452503f9
AC
2672}
2673
2674int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2675 struct ata_timing *t, int T, int UT)
2676{
2677 const struct ata_timing *s;
2678 struct ata_timing p;
2679
2680 /*
2e9edbf8 2681 * Find the mode.
75b1f2f8 2682 */
452503f9
AC
2683
2684 if (!(s = ata_timing_find_mode(speed)))
2685 return -EINVAL;
2686
75b1f2f8
AL
2687 memcpy(t, s, sizeof(*s));
2688
452503f9
AC
2689 /*
2690 * If the drive is an EIDE drive, it can tell us it needs extended
2691 * PIO/MW_DMA cycle timing.
2692 */
2693
2694 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2695 memset(&p, 0, sizeof(p));
2696 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2697 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2698 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2699 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2700 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2701 }
2702 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2703 }
2704
2705 /*
2706 * Convert the timing to bus clock counts.
2707 */
2708
75b1f2f8 2709 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2710
2711 /*
c893a3ae
RD
2712 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2713 * S.M.A.R.T * and some other commands. We have to ensure that the
2714 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2715 */
2716
fd3367af 2717 if (speed > XFER_PIO_6) {
452503f9
AC
2718 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2719 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2720 }
2721
2722 /*
c893a3ae 2723 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2724 */
2725
2726 if (t->act8b + t->rec8b < t->cyc8b) {
2727 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2728 t->rec8b = t->cyc8b - t->act8b;
2729 }
2730
2731 if (t->active + t->recover < t->cycle) {
2732 t->active += (t->cycle - (t->active + t->recover)) / 2;
2733 t->recover = t->cycle - t->active;
2734 }
a617c09f 2735
4f701d1e
AC
2736 /* In a few cases quantisation may produce enough errors to
2737 leave t->cycle too low for the sum of active and recovery
2738 if so we must correct this */
2739 if (t->active + t->recover > t->cycle)
2740 t->cycle = t->active + t->recover;
452503f9
AC
2741
2742 return 0;
2743}
2744
cf176e1a
TH
2745/**
2746 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2747 * @dev: Device to adjust xfer masks
458337db 2748 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2749 *
2750 * Adjust xfer masks of @dev downward. Note that this function
2751 * does not apply the change. Invoking ata_set_mode() afterwards
2752 * will apply the limit.
2753 *
2754 * LOCKING:
2755 * Inherited from caller.
2756 *
2757 * RETURNS:
2758 * 0 on success, negative errno on failure
2759 */
458337db 2760int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2761{
458337db
TH
2762 char buf[32];
2763 unsigned int orig_mask, xfer_mask;
2764 unsigned int pio_mask, mwdma_mask, udma_mask;
2765 int quiet, highbit;
cf176e1a 2766
458337db
TH
2767 quiet = !!(sel & ATA_DNXFER_QUIET);
2768 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2769
458337db
TH
2770 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2771 dev->mwdma_mask,
2772 dev->udma_mask);
2773 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2774
458337db
TH
2775 switch (sel) {
2776 case ATA_DNXFER_PIO:
2777 highbit = fls(pio_mask) - 1;
2778 pio_mask &= ~(1 << highbit);
2779 break;
2780
2781 case ATA_DNXFER_DMA:
2782 if (udma_mask) {
2783 highbit = fls(udma_mask) - 1;
2784 udma_mask &= ~(1 << highbit);
2785 if (!udma_mask)
2786 return -ENOENT;
2787 } else if (mwdma_mask) {
2788 highbit = fls(mwdma_mask) - 1;
2789 mwdma_mask &= ~(1 << highbit);
2790 if (!mwdma_mask)
2791 return -ENOENT;
2792 }
2793 break;
2794
2795 case ATA_DNXFER_40C:
2796 udma_mask &= ATA_UDMA_MASK_40C;
2797 break;
2798
2799 case ATA_DNXFER_FORCE_PIO0:
2800 pio_mask &= 1;
2801 case ATA_DNXFER_FORCE_PIO:
2802 mwdma_mask = 0;
2803 udma_mask = 0;
2804 break;
2805
458337db
TH
2806 default:
2807 BUG();
2808 }
2809
2810 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2811
2812 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2813 return -ENOENT;
2814
2815 if (!quiet) {
2816 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2817 snprintf(buf, sizeof(buf), "%s:%s",
2818 ata_mode_string(xfer_mask),
2819 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2820 else
2821 snprintf(buf, sizeof(buf), "%s",
2822 ata_mode_string(xfer_mask));
2823
2824 ata_dev_printk(dev, KERN_WARNING,
2825 "limiting speed to %s\n", buf);
2826 }
cf176e1a
TH
2827
2828 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2829 &dev->udma_mask);
2830
cf176e1a 2831 return 0;
cf176e1a
TH
2832}
2833
3373efd8 2834static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2835{
9af5c9c9 2836 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
2837 unsigned int err_mask;
2838 int rc;
1da177e4 2839
e8384607 2840 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2841 if (dev->xfer_shift == ATA_SHIFT_PIO)
2842 dev->flags |= ATA_DFLAG_PIO;
2843
3373efd8 2844 err_mask = ata_dev_set_xfermode(dev);
11750a40
AC
2845 /* Old CFA may refuse this command, which is just fine */
2846 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2847 err_mask &= ~AC_ERR_DEV;
0bc2a79a
AC
2848 /* Some very old devices and some bad newer ones fail any kind of
2849 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2850 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2851 dev->pio_mode <= XFER_PIO_2)
2852 err_mask &= ~AC_ERR_DEV;
83206a29 2853 if (err_mask) {
f15a1daf
TH
2854 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2855 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2856 return -EIO;
2857 }
1da177e4 2858
baa1e78a 2859 ehc->i.flags |= ATA_EHI_POST_SETMODE;
422c9daa 2860 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
baa1e78a 2861 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2862 if (rc)
83206a29 2863 return rc;
48a8a14f 2864
23e71c3d
TH
2865 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2866 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2867
f15a1daf
TH
2868 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2869 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2870 return 0;
1da177e4
LT
2871}
2872
1da177e4 2873/**
04351821 2874 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2875 * @link: link on which timings will be programmed
e82cbdb9 2876 * @r_failed_dev: out paramter for failed device
1da177e4 2877 *
04351821
AC
2878 * Standard implementation of the function used to tune and set
2879 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2880 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2881 * returned in @r_failed_dev.
780a87f7 2882 *
1da177e4 2883 * LOCKING:
0cba632b 2884 * PCI/etc. bus probe sem.
e82cbdb9
TH
2885 *
2886 * RETURNS:
2887 * 0 on success, negative errno otherwise
1da177e4 2888 */
04351821 2889
0260731f 2890int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 2891{
0260731f 2892 struct ata_port *ap = link->ap;
e8e0619f 2893 struct ata_device *dev;
f58229f8 2894 int rc = 0, used_dma = 0, found = 0;
3adcebb2 2895
a6d5a51c 2896 /* step 1: calculate xfer_mask */
f58229f8 2897 ata_link_for_each_dev(dev, link) {
acf356b1 2898 unsigned int pio_mask, dma_mask;
a6d5a51c 2899
e1211e3f 2900 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2901 continue;
2902
3373efd8 2903 ata_dev_xfermask(dev);
1da177e4 2904
acf356b1
TH
2905 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2906 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2907 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2908 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2909
4f65977d 2910 found = 1;
5444a6f4
AC
2911 if (dev->dma_mode)
2912 used_dma = 1;
a6d5a51c 2913 }
4f65977d 2914 if (!found)
e82cbdb9 2915 goto out;
a6d5a51c
TH
2916
2917 /* step 2: always set host PIO timings */
f58229f8 2918 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2919 if (!ata_dev_enabled(dev))
2920 continue;
2921
2922 if (!dev->pio_mode) {
f15a1daf 2923 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2924 rc = -EINVAL;
e82cbdb9 2925 goto out;
e8e0619f
TH
2926 }
2927
2928 dev->xfer_mode = dev->pio_mode;
2929 dev->xfer_shift = ATA_SHIFT_PIO;
2930 if (ap->ops->set_piomode)
2931 ap->ops->set_piomode(ap, dev);
2932 }
1da177e4 2933
a6d5a51c 2934 /* step 3: set host DMA timings */
f58229f8 2935 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2936 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2937 continue;
2938
2939 dev->xfer_mode = dev->dma_mode;
2940 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2941 if (ap->ops->set_dmamode)
2942 ap->ops->set_dmamode(ap, dev);
2943 }
1da177e4
LT
2944
2945 /* step 4: update devices' xfer mode */
f58229f8 2946 ata_link_for_each_dev(dev, link) {
18d90deb 2947 /* don't update suspended devices' xfer mode */
9666f400 2948 if (!ata_dev_enabled(dev))
83206a29
TH
2949 continue;
2950
3373efd8 2951 rc = ata_dev_set_mode(dev);
5bbc53f4 2952 if (rc)
e82cbdb9 2953 goto out;
83206a29 2954 }
1da177e4 2955
e8e0619f
TH
2956 /* Record simplex status. If we selected DMA then the other
2957 * host channels are not permitted to do so.
5444a6f4 2958 */
cca3974e 2959 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2960 ap->host->simplex_claimed = ap;
5444a6f4 2961
e82cbdb9
TH
2962 out:
2963 if (rc)
2964 *r_failed_dev = dev;
2965 return rc;
1da177e4
LT
2966}
2967
04351821
AC
2968/**
2969 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2970 * @link: link on which timings will be programmed
04351821
AC
2971 * @r_failed_dev: out paramter for failed device
2972 *
2973 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2974 * ata_set_mode() fails, pointer to the failing device is
2975 * returned in @r_failed_dev.
2976 *
2977 * LOCKING:
2978 * PCI/etc. bus probe sem.
2979 *
2980 * RETURNS:
2981 * 0 on success, negative errno otherwise
2982 */
0260731f 2983int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 2984{
0260731f
TH
2985 struct ata_port *ap = link->ap;
2986
04351821
AC
2987 /* has private set_mode? */
2988 if (ap->ops->set_mode)
0260731f
TH
2989 return ap->ops->set_mode(link, r_failed_dev);
2990 return ata_do_set_mode(link, r_failed_dev);
04351821
AC
2991}
2992
1fdffbce
JG
2993/**
2994 * ata_tf_to_host - issue ATA taskfile to host controller
2995 * @ap: port to which command is being issued
2996 * @tf: ATA taskfile register set
2997 *
2998 * Issues ATA taskfile register set to ATA host controller,
2999 * with proper synchronization with interrupt handler and
3000 * other threads.
3001 *
3002 * LOCKING:
cca3974e 3003 * spin_lock_irqsave(host lock)
1fdffbce
JG
3004 */
3005
3006static inline void ata_tf_to_host(struct ata_port *ap,
3007 const struct ata_taskfile *tf)
3008{
3009 ap->ops->tf_load(ap, tf);
3010 ap->ops->exec_command(ap, tf);
3011}
3012
1da177e4
LT
3013/**
3014 * ata_busy_sleep - sleep until BSY clears, or timeout
3015 * @ap: port containing status register to be polled
3016 * @tmout_pat: impatience timeout
3017 * @tmout: overall timeout
3018 *
780a87f7
JG
3019 * Sleep until ATA Status register bit BSY clears,
3020 * or a timeout occurs.
3021 *
d1adc1bb
TH
3022 * LOCKING:
3023 * Kernel thread context (may sleep).
3024 *
3025 * RETURNS:
3026 * 0 on success, -errno otherwise.
1da177e4 3027 */
d1adc1bb
TH
3028int ata_busy_sleep(struct ata_port *ap,
3029 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3030{
3031 unsigned long timer_start, timeout;
3032 u8 status;
3033
3034 status = ata_busy_wait(ap, ATA_BUSY, 300);
3035 timer_start = jiffies;
3036 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3037 while (status != 0xff && (status & ATA_BUSY) &&
3038 time_before(jiffies, timeout)) {
1da177e4
LT
3039 msleep(50);
3040 status = ata_busy_wait(ap, ATA_BUSY, 3);
3041 }
3042
d1adc1bb 3043 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3044 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3045 "port is slow to respond, please be patient "
3046 "(Status 0x%x)\n", status);
1da177e4
LT
3047
3048 timeout = timer_start + tmout;
d1adc1bb
TH
3049 while (status != 0xff && (status & ATA_BUSY) &&
3050 time_before(jiffies, timeout)) {
1da177e4
LT
3051 msleep(50);
3052 status = ata_chk_status(ap);
3053 }
3054
d1adc1bb
TH
3055 if (status == 0xff)
3056 return -ENODEV;
3057
1da177e4 3058 if (status & ATA_BUSY) {
f15a1daf 3059 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3060 "(%lu secs, Status 0x%x)\n",
3061 tmout / HZ, status);
d1adc1bb 3062 return -EBUSY;
1da177e4
LT
3063 }
3064
3065 return 0;
3066}
3067
d4b2bab4
TH
3068/**
3069 * ata_wait_ready - sleep until BSY clears, or timeout
3070 * @ap: port containing status register to be polled
3071 * @deadline: deadline jiffies for the operation
3072 *
3073 * Sleep until ATA Status register bit BSY clears, or timeout
3074 * occurs.
3075 *
3076 * LOCKING:
3077 * Kernel thread context (may sleep).
3078 *
3079 * RETURNS:
3080 * 0 on success, -errno otherwise.
3081 */
3082int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3083{
3084 unsigned long start = jiffies;
3085 int warned = 0;
3086
3087 while (1) {
3088 u8 status = ata_chk_status(ap);
3089 unsigned long now = jiffies;
3090
3091 if (!(status & ATA_BUSY))
3092 return 0;
936fd732 3093 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3094 return -ENODEV;
3095 if (time_after(now, deadline))
3096 return -EBUSY;
3097
3098 if (!warned && time_after(now, start + 5 * HZ) &&
3099 (deadline - now > 3 * HZ)) {
3100 ata_port_printk(ap, KERN_WARNING,
3101 "port is slow to respond, please be patient "
3102 "(Status 0x%x)\n", status);
3103 warned = 1;
3104 }
3105
3106 msleep(50);
3107 }
3108}
3109
3110static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3111 unsigned long deadline)
1da177e4
LT
3112{
3113 struct ata_ioports *ioaddr = &ap->ioaddr;
3114 unsigned int dev0 = devmask & (1 << 0);
3115 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3116 int rc, ret = 0;
1da177e4
LT
3117
3118 /* if device 0 was found in ata_devchk, wait for its
3119 * BSY bit to clear
3120 */
d4b2bab4
TH
3121 if (dev0) {
3122 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3123 if (rc) {
3124 if (rc != -ENODEV)
3125 return rc;
3126 ret = rc;
3127 }
d4b2bab4 3128 }
1da177e4 3129
e141d999
TH
3130 /* if device 1 was found in ata_devchk, wait for register
3131 * access briefly, then wait for BSY to clear.
1da177e4 3132 */
e141d999
TH
3133 if (dev1) {
3134 int i;
1da177e4
LT
3135
3136 ap->ops->dev_select(ap, 1);
e141d999
TH
3137
3138 /* Wait for register access. Some ATAPI devices fail
3139 * to set nsect/lbal after reset, so don't waste too
3140 * much time on it. We're gonna wait for !BSY anyway.
3141 */
3142 for (i = 0; i < 2; i++) {
3143 u8 nsect, lbal;
3144
3145 nsect = ioread8(ioaddr->nsect_addr);
3146 lbal = ioread8(ioaddr->lbal_addr);
3147 if ((nsect == 1) && (lbal == 1))
3148 break;
3149 msleep(50); /* give drive a breather */
3150 }
3151
d4b2bab4 3152 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3153 if (rc) {
3154 if (rc != -ENODEV)
3155 return rc;
3156 ret = rc;
3157 }
d4b2bab4 3158 }
1da177e4
LT
3159
3160 /* is all this really necessary? */
3161 ap->ops->dev_select(ap, 0);
3162 if (dev1)
3163 ap->ops->dev_select(ap, 1);
3164 if (dev0)
3165 ap->ops->dev_select(ap, 0);
d4b2bab4 3166
9b89391c 3167 return ret;
1da177e4
LT
3168}
3169
d4b2bab4
TH
3170static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3171 unsigned long deadline)
1da177e4
LT
3172{
3173 struct ata_ioports *ioaddr = &ap->ioaddr;
3174
44877b4e 3175 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3176
3177 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3178 iowrite8(ap->ctl, ioaddr->ctl_addr);
3179 udelay(20); /* FIXME: flush */
3180 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3181 udelay(20); /* FIXME: flush */
3182 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3183
3184 /* spec mandates ">= 2ms" before checking status.
3185 * We wait 150ms, because that was the magic delay used for
3186 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3187 * between when the ATA command register is written, and then
3188 * status is checked. Because waiting for "a while" before
3189 * checking status is fine, post SRST, we perform this magic
3190 * delay here as well.
09c7ad79
AC
3191 *
3192 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3193 */
3194 msleep(150);
3195
2e9edbf8 3196 /* Before we perform post reset processing we want to see if
298a41ca
TH
3197 * the bus shows 0xFF because the odd clown forgets the D7
3198 * pulldown resistor.
3199 */
d1adc1bb 3200 if (ata_check_status(ap) == 0xFF)
9b89391c 3201 return -ENODEV;
09c7ad79 3202
d4b2bab4 3203 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3204}
3205
3206/**
3207 * ata_bus_reset - reset host port and associated ATA channel
3208 * @ap: port to reset
3209 *
3210 * This is typically the first time we actually start issuing
3211 * commands to the ATA channel. We wait for BSY to clear, then
3212 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3213 * result. Determine what devices, if any, are on the channel
3214 * by looking at the device 0/1 error register. Look at the signature
3215 * stored in each device's taskfile registers, to determine if
3216 * the device is ATA or ATAPI.
3217 *
3218 * LOCKING:
0cba632b 3219 * PCI/etc. bus probe sem.
cca3974e 3220 * Obtains host lock.
1da177e4
LT
3221 *
3222 * SIDE EFFECTS:
198e0fed 3223 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3224 */
3225
3226void ata_bus_reset(struct ata_port *ap)
3227{
9af5c9c9 3228 struct ata_device *device = ap->link.device;
1da177e4
LT
3229 struct ata_ioports *ioaddr = &ap->ioaddr;
3230 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3231 u8 err;
aec5c3c1 3232 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3233 int rc;
1da177e4 3234
44877b4e 3235 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3236
3237 /* determine if device 0/1 are present */
3238 if (ap->flags & ATA_FLAG_SATA_RESET)
3239 dev0 = 1;
3240 else {
3241 dev0 = ata_devchk(ap, 0);
3242 if (slave_possible)
3243 dev1 = ata_devchk(ap, 1);
3244 }
3245
3246 if (dev0)
3247 devmask |= (1 << 0);
3248 if (dev1)
3249 devmask |= (1 << 1);
3250
3251 /* select device 0 again */
3252 ap->ops->dev_select(ap, 0);
3253
3254 /* issue bus reset */
9b89391c
TH
3255 if (ap->flags & ATA_FLAG_SRST) {
3256 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3257 if (rc && rc != -ENODEV)
aec5c3c1 3258 goto err_out;
9b89391c 3259 }
1da177e4
LT
3260
3261 /*
3262 * determine by signature whether we have ATA or ATAPI devices
3263 */
3f19859e 3264 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3265 if ((slave_possible) && (err != 0x81))
3f19859e 3266 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3267
1da177e4 3268 /* is double-select really necessary? */
9af5c9c9 3269 if (device[1].class != ATA_DEV_NONE)
1da177e4 3270 ap->ops->dev_select(ap, 1);
9af5c9c9 3271 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3272 ap->ops->dev_select(ap, 0);
3273
3274 /* if no devices were detected, disable this port */
9af5c9c9
TH
3275 if ((device[0].class == ATA_DEV_NONE) &&
3276 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3277 goto err_out;
3278
3279 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3280 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3281 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3282 }
3283
3284 DPRINTK("EXIT\n");
3285 return;
3286
3287err_out:
f15a1daf 3288 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3289 ata_port_disable(ap);
1da177e4
LT
3290
3291 DPRINTK("EXIT\n");
3292}
3293
d7bb4cc7 3294/**
936fd732
TH
3295 * sata_link_debounce - debounce SATA phy status
3296 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3297 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3298 * @deadline: deadline jiffies for the operation
d7bb4cc7 3299 *
936fd732 3300* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3301 * holding the same value where DET is not 1 for @duration polled
3302 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3303 * beginning of the stable state. Because DET gets stuck at 1 on
3304 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3305 * until timeout then returns 0 if DET is stable at 1.
3306 *
d4b2bab4
TH
3307 * @timeout is further limited by @deadline. The sooner of the
3308 * two is used.
3309 *
d7bb4cc7
TH
3310 * LOCKING:
3311 * Kernel thread context (may sleep)
3312 *
3313 * RETURNS:
3314 * 0 on success, -errno on failure.
3315 */
936fd732
TH
3316int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3317 unsigned long deadline)
7a7921e8 3318{
d7bb4cc7 3319 unsigned long interval_msec = params[0];
d4b2bab4
TH
3320 unsigned long duration = msecs_to_jiffies(params[1]);
3321 unsigned long last_jiffies, t;
d7bb4cc7
TH
3322 u32 last, cur;
3323 int rc;
3324
d4b2bab4
TH
3325 t = jiffies + msecs_to_jiffies(params[2]);
3326 if (time_before(t, deadline))
3327 deadline = t;
3328
936fd732 3329 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3330 return rc;
3331 cur &= 0xf;
3332
3333 last = cur;
3334 last_jiffies = jiffies;
3335
3336 while (1) {
3337 msleep(interval_msec);
936fd732 3338 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3339 return rc;
3340 cur &= 0xf;
3341
3342 /* DET stable? */
3343 if (cur == last) {
d4b2bab4 3344 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3345 continue;
3346 if (time_after(jiffies, last_jiffies + duration))
3347 return 0;
3348 continue;
3349 }
3350
3351 /* unstable, start over */
3352 last = cur;
3353 last_jiffies = jiffies;
3354
f1545154
TH
3355 /* Check deadline. If debouncing failed, return
3356 * -EPIPE to tell upper layer to lower link speed.
3357 */
d4b2bab4 3358 if (time_after(jiffies, deadline))
f1545154 3359 return -EPIPE;
d7bb4cc7
TH
3360 }
3361}
3362
3363/**
936fd732
TH
3364 * sata_link_resume - resume SATA link
3365 * @link: ATA link to resume SATA
d7bb4cc7 3366 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3367 * @deadline: deadline jiffies for the operation
d7bb4cc7 3368 *
936fd732 3369 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3370 *
3371 * LOCKING:
3372 * Kernel thread context (may sleep)
3373 *
3374 * RETURNS:
3375 * 0 on success, -errno on failure.
3376 */
936fd732
TH
3377int sata_link_resume(struct ata_link *link, const unsigned long *params,
3378 unsigned long deadline)
d7bb4cc7
TH
3379{
3380 u32 scontrol;
81952c54
TH
3381 int rc;
3382
936fd732 3383 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3384 return rc;
7a7921e8 3385
852ee16a 3386 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3387
936fd732 3388 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3389 return rc;
7a7921e8 3390
d7bb4cc7
TH
3391 /* Some PHYs react badly if SStatus is pounded immediately
3392 * after resuming. Delay 200ms before debouncing.
3393 */
3394 msleep(200);
7a7921e8 3395
936fd732 3396 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3397}
3398
f5914a46
TH
3399/**
3400 * ata_std_prereset - prepare for reset
cc0680a5 3401 * @link: ATA link to be reset
d4b2bab4 3402 * @deadline: deadline jiffies for the operation
f5914a46 3403 *
cc0680a5 3404 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3405 * prereset makes libata abort whole reset sequence and give up
3406 * that port, so prereset should be best-effort. It does its
3407 * best to prepare for reset sequence but if things go wrong, it
3408 * should just whine, not fail.
f5914a46
TH
3409 *
3410 * LOCKING:
3411 * Kernel thread context (may sleep)
3412 *
3413 * RETURNS:
3414 * 0 on success, -errno otherwise.
3415 */
cc0680a5 3416int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3417{
cc0680a5 3418 struct ata_port *ap = link->ap;
936fd732 3419 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3420 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3421 int rc;
3422
31daabda 3423 /* handle link resume */
28324304 3424 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3425 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3426 ehc->i.action |= ATA_EH_HARDRESET;
3427
f5914a46
TH
3428 /* if we're about to do hardreset, nothing more to do */
3429 if (ehc->i.action & ATA_EH_HARDRESET)
3430 return 0;
3431
936fd732 3432 /* if SATA, resume link */
a16abc0b 3433 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3434 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3435 /* whine about phy resume failure but proceed */
3436 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3437 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3438 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3439 }
3440
3441 /* Wait for !BSY if the controller can wait for the first D2H
3442 * Reg FIS and we don't know that no device is attached.
3443 */
0c88758b 3444 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3445 rc = ata_wait_ready(ap, deadline);
6dffaf61 3446 if (rc && rc != -ENODEV) {
cc0680a5 3447 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3448 "(errno=%d), forcing hardreset\n", rc);
3449 ehc->i.action |= ATA_EH_HARDRESET;
3450 }
3451 }
f5914a46
TH
3452
3453 return 0;
3454}
3455
c2bd5804
TH
3456/**
3457 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3458 * @link: ATA link to reset
c2bd5804 3459 * @classes: resulting classes of attached devices
d4b2bab4 3460 * @deadline: deadline jiffies for the operation
c2bd5804 3461 *
52783c5d 3462 * Reset host port using ATA SRST.
c2bd5804
TH
3463 *
3464 * LOCKING:
3465 * Kernel thread context (may sleep)
3466 *
3467 * RETURNS:
3468 * 0 on success, -errno otherwise.
3469 */
cc0680a5 3470int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3471 unsigned long deadline)
c2bd5804 3472{
cc0680a5 3473 struct ata_port *ap = link->ap;
c2bd5804 3474 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3475 unsigned int devmask = 0;
3476 int rc;
c2bd5804
TH
3477 u8 err;
3478
3479 DPRINTK("ENTER\n");
3480
936fd732 3481 if (ata_link_offline(link)) {
3a39746a
TH
3482 classes[0] = ATA_DEV_NONE;
3483 goto out;
3484 }
3485
c2bd5804
TH
3486 /* determine if device 0/1 are present */
3487 if (ata_devchk(ap, 0))
3488 devmask |= (1 << 0);
3489 if (slave_possible && ata_devchk(ap, 1))
3490 devmask |= (1 << 1);
3491
c2bd5804
TH
3492 /* select device 0 again */
3493 ap->ops->dev_select(ap, 0);
3494
3495 /* issue bus reset */
3496 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3497 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3498 /* if link is occupied, -ENODEV too is an error */
936fd732 3499 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3500 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3501 return rc;
c2bd5804
TH
3502 }
3503
3504 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3505 classes[0] = ata_dev_try_classify(&link->device[0],
3506 devmask & (1 << 0), &err);
c2bd5804 3507 if (slave_possible && err != 0x81)
3f19859e
TH
3508 classes[1] = ata_dev_try_classify(&link->device[1],
3509 devmask & (1 << 1), &err);
c2bd5804 3510
3a39746a 3511 out:
c2bd5804
TH
3512 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3513 return 0;
3514}
3515
3516/**
cc0680a5
TH
3517 * sata_link_hardreset - reset link via SATA phy reset
3518 * @link: link to reset
b6103f6d 3519 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3520 * @deadline: deadline jiffies for the operation
c2bd5804 3521 *
cc0680a5 3522 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3523 *
3524 * LOCKING:
3525 * Kernel thread context (may sleep)
3526 *
3527 * RETURNS:
3528 * 0 on success, -errno otherwise.
3529 */
cc0680a5 3530int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3531 unsigned long deadline)
c2bd5804 3532{
852ee16a 3533 u32 scontrol;
81952c54 3534 int rc;
852ee16a 3535
c2bd5804
TH
3536 DPRINTK("ENTER\n");
3537
936fd732 3538 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3539 /* SATA spec says nothing about how to reconfigure
3540 * spd. To be on the safe side, turn off phy during
3541 * reconfiguration. This works for at least ICH7 AHCI
3542 * and Sil3124.
3543 */
936fd732 3544 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3545 goto out;
81952c54 3546
a34b6fc0 3547 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3548
936fd732 3549 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3550 goto out;
1c3fae4d 3551
936fd732 3552 sata_set_spd(link);
1c3fae4d
TH
3553 }
3554
3555 /* issue phy wake/reset */
936fd732 3556 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3557 goto out;
81952c54 3558
852ee16a 3559 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3560
936fd732 3561 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3562 goto out;
c2bd5804 3563
1c3fae4d 3564 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3565 * 10.4.2 says at least 1 ms.
3566 */
3567 msleep(1);
3568
936fd732
TH
3569 /* bring link back */
3570 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3571 out:
3572 DPRINTK("EXIT, rc=%d\n", rc);
3573 return rc;
3574}
3575
3576/**
3577 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3578 * @link: link to reset
b6103f6d 3579 * @class: resulting class of attached device
d4b2bab4 3580 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3581 *
3582 * SATA phy-reset host port using DET bits of SControl register,
3583 * wait for !BSY and classify the attached device.
3584 *
3585 * LOCKING:
3586 * Kernel thread context (may sleep)
3587 *
3588 * RETURNS:
3589 * 0 on success, -errno otherwise.
3590 */
cc0680a5 3591int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3592 unsigned long deadline)
b6103f6d 3593{
cc0680a5 3594 struct ata_port *ap = link->ap;
936fd732 3595 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3596 int rc;
3597
3598 DPRINTK("ENTER\n");
3599
3600 /* do hardreset */
cc0680a5 3601 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3602 if (rc) {
cc0680a5 3603 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3604 "COMRESET failed (errno=%d)\n", rc);
3605 return rc;
3606 }
c2bd5804 3607
c2bd5804 3608 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3609 if (ata_link_offline(link)) {
c2bd5804
TH
3610 *class = ATA_DEV_NONE;
3611 DPRINTK("EXIT, link offline\n");
3612 return 0;
3613 }
3614
34fee227
TH
3615 /* wait a while before checking status, see SRST for more info */
3616 msleep(150);
3617
d4b2bab4 3618 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3619 /* link occupied, -ENODEV too is an error */
3620 if (rc) {
cc0680a5 3621 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3622 "COMRESET failed (errno=%d)\n", rc);
3623 return rc;
c2bd5804
TH
3624 }
3625
3a39746a
TH
3626 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3627
3f19859e 3628 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3629
3630 DPRINTK("EXIT, class=%u\n", *class);
3631 return 0;
3632}
3633
3634/**
3635 * ata_std_postreset - standard postreset callback
cc0680a5 3636 * @link: the target ata_link
c2bd5804
TH
3637 * @classes: classes of attached devices
3638 *
3639 * This function is invoked after a successful reset. Note that
3640 * the device might have been reset more than once using
3641 * different reset methods before postreset is invoked.
c2bd5804 3642 *
c2bd5804
TH
3643 * LOCKING:
3644 * Kernel thread context (may sleep)
3645 */
cc0680a5 3646void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3647{
cc0680a5 3648 struct ata_port *ap = link->ap;
dc2b3515
TH
3649 u32 serror;
3650
c2bd5804
TH
3651 DPRINTK("ENTER\n");
3652
c2bd5804 3653 /* print link status */
936fd732 3654 sata_print_link_status(link);
c2bd5804 3655
dc2b3515 3656 /* clear SError */
936fd732
TH
3657 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3658 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3659
c2bd5804
TH
3660 /* is double-select really necessary? */
3661 if (classes[0] != ATA_DEV_NONE)
3662 ap->ops->dev_select(ap, 1);
3663 if (classes[1] != ATA_DEV_NONE)
3664 ap->ops->dev_select(ap, 0);
3665
3a39746a
TH
3666 /* bail out if no device is present */
3667 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3668 DPRINTK("EXIT, no device\n");
3669 return;
3670 }
3671
3672 /* set up device control */
0d5ff566
TH
3673 if (ap->ioaddr.ctl_addr)
3674 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3675
3676 DPRINTK("EXIT\n");
3677}
3678
623a3128
TH
3679/**
3680 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3681 * @dev: device to compare against
3682 * @new_class: class of the new device
3683 * @new_id: IDENTIFY page of the new device
3684 *
3685 * Compare @new_class and @new_id against @dev and determine
3686 * whether @dev is the device indicated by @new_class and
3687 * @new_id.
3688 *
3689 * LOCKING:
3690 * None.
3691 *
3692 * RETURNS:
3693 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3694 */
3373efd8
TH
3695static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3696 const u16 *new_id)
623a3128
TH
3697{
3698 const u16 *old_id = dev->id;
a0cf733b
TH
3699 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3700 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3701
3702 if (dev->class != new_class) {
f15a1daf
TH
3703 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3704 dev->class, new_class);
623a3128
TH
3705 return 0;
3706 }
3707
a0cf733b
TH
3708 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3709 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3710 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3711 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3712
3713 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3714 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3715 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3716 return 0;
3717 }
3718
3719 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3720 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3721 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3722 return 0;
3723 }
3724
623a3128
TH
3725 return 1;
3726}
3727
3728/**
fe30911b 3729 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3730 * @dev: target ATA device
bff04647 3731 * @readid_flags: read ID flags
623a3128
TH
3732 *
3733 * Re-read IDENTIFY page and make sure @dev is still attached to
3734 * the port.
3735 *
3736 * LOCKING:
3737 * Kernel thread context (may sleep)
3738 *
3739 * RETURNS:
3740 * 0 on success, negative errno otherwise
3741 */
fe30911b 3742int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3743{
5eb45c02 3744 unsigned int class = dev->class;
9af5c9c9 3745 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3746 int rc;
3747
fe635c7e 3748 /* read ID data */
bff04647 3749 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3750 if (rc)
fe30911b 3751 return rc;
623a3128
TH
3752
3753 /* is the device still there? */
fe30911b
TH
3754 if (!ata_dev_same_device(dev, class, id))
3755 return -ENODEV;
623a3128 3756
fe635c7e 3757 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3758 return 0;
3759}
3760
3761/**
3762 * ata_dev_revalidate - Revalidate ATA device
3763 * @dev: device to revalidate
422c9daa 3764 * @new_class: new class code
fe30911b
TH
3765 * @readid_flags: read ID flags
3766 *
3767 * Re-read IDENTIFY page, make sure @dev is still attached to the
3768 * port and reconfigure it according to the new IDENTIFY page.
3769 *
3770 * LOCKING:
3771 * Kernel thread context (may sleep)
3772 *
3773 * RETURNS:
3774 * 0 on success, negative errno otherwise
3775 */
422c9daa
TH
3776int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3777 unsigned int readid_flags)
fe30911b 3778{
6ddcd3b0 3779 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3780 int rc;
3781
3782 if (!ata_dev_enabled(dev))
3783 return -ENODEV;
3784
422c9daa
TH
3785 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3786 if (ata_class_enabled(new_class) &&
3787 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3788 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3789 dev->class, new_class);
3790 rc = -ENODEV;
3791 goto fail;
3792 }
3793
fe30911b
TH
3794 /* re-read ID */
3795 rc = ata_dev_reread_id(dev, readid_flags);
3796 if (rc)
3797 goto fail;
623a3128
TH
3798
3799 /* configure device according to the new ID */
efdaedc4 3800 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3801 if (rc)
3802 goto fail;
3803
3804 /* verify n_sectors hasn't changed */
b54eebd6
TH
3805 if (dev->class == ATA_DEV_ATA && n_sectors &&
3806 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3807 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3808 "%llu != %llu\n",
3809 (unsigned long long)n_sectors,
3810 (unsigned long long)dev->n_sectors);
8270bec4
TH
3811
3812 /* restore original n_sectors */
3813 dev->n_sectors = n_sectors;
3814
6ddcd3b0
TH
3815 rc = -ENODEV;
3816 goto fail;
3817 }
3818
3819 return 0;
623a3128
TH
3820
3821 fail:
f15a1daf 3822 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3823 return rc;
3824}
3825
6919a0a6
AC
3826struct ata_blacklist_entry {
3827 const char *model_num;
3828 const char *model_rev;
3829 unsigned long horkage;
3830};
3831
3832static const struct ata_blacklist_entry ata_device_blacklist [] = {
3833 /* Devices with DMA related problems under Linux */
3834 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3835 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3836 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3837 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3838 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3839 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3840 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3841 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3842 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3843 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3844 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3845 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3846 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3847 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3848 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3849 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3850 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3851 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3852 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3853 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3854 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3855 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3856 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3857 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3858 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3859 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3860 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3861 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3862 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
39f19886 3863 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
5acd50f6 3864 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
39ce7128
TH
3865 { "IOMEGA ZIP 250 ATAPI Floppy",
3866 NULL, ATA_HORKAGE_NODMA },
6919a0a6 3867
18d6e9d5 3868 /* Weird ATAPI devices */
40a1d531 3869 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3870
6919a0a6
AC
3871 /* Devices we expect to fail diagnostics */
3872
3873 /* Devices where NCQ should be avoided */
3874 /* NCQ is slow */
3875 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3876 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3877 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3878 /* NCQ is broken */
539cc7c7 3879 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3880 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
0b0a43e0
DM
3881 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
3882 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
539cc7c7 3883
36e337d0
RH
3884 /* Blacklist entries taken from Silicon Image 3124/3132
3885 Windows driver .inf file - also several Linux problem reports */
3886 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3887 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3888 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
3889 /* Drives which do spurious command completion */
3890 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 3891 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
e14cbfa6 3892 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
2f8fcebb 3893 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
7f567620 3894 { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, },
a520f261 3895 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
7f567620 3896 { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3fb6589c 3897 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
7f567620
TH
3898 { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
3899 { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
5d6aca8d 3900 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
6919a0a6 3901
16c55b03
TH
3902 /* devices which puke on READ_NATIVE_MAX */
3903 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3904 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3905 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3906 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6
AC
3907
3908 /* End Marker */
3909 { }
1da177e4 3910};
2e9edbf8 3911
539cc7c7
JG
3912int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3913{
3914 const char *p;
3915 int len;
3916
3917 /*
3918 * check for trailing wildcard: *\0
3919 */
3920 p = strchr(patt, wildchar);
3921 if (p && ((*(p + 1)) == 0))
3922 len = p - patt;
3923 else
3924 len = strlen(name);
3925
3926 return strncmp(patt, name, len);
3927}
3928
75683fe7 3929static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 3930{
8bfa79fc
TH
3931 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3932 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3933 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3934
8bfa79fc
TH
3935 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3936 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3937
6919a0a6 3938 while (ad->model_num) {
539cc7c7 3939 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
3940 if (ad->model_rev == NULL)
3941 return ad->horkage;
539cc7c7 3942 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 3943 return ad->horkage;
f4b15fef 3944 }
6919a0a6 3945 ad++;
f4b15fef 3946 }
1da177e4
LT
3947 return 0;
3948}
3949
6919a0a6
AC
3950static int ata_dma_blacklisted(const struct ata_device *dev)
3951{
3952 /* We don't support polling DMA.
3953 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3954 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3955 */
9af5c9c9 3956 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
3957 (dev->flags & ATA_DFLAG_CDB_INTR))
3958 return 1;
75683fe7 3959 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
3960}
3961
a6d5a51c
TH
3962/**
3963 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3964 * @dev: Device to compute xfermask for
3965 *
acf356b1
TH
3966 * Compute supported xfermask of @dev and store it in
3967 * dev->*_mask. This function is responsible for applying all
3968 * known limits including host controller limits, device
3969 * blacklist, etc...
a6d5a51c
TH
3970 *
3971 * LOCKING:
3972 * None.
a6d5a51c 3973 */
3373efd8 3974static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3975{
9af5c9c9
TH
3976 struct ata_link *link = dev->link;
3977 struct ata_port *ap = link->ap;
cca3974e 3978 struct ata_host *host = ap->host;
a6d5a51c 3979 unsigned long xfer_mask;
1da177e4 3980
37deecb5 3981 /* controller modes available */
565083e1
TH
3982 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3983 ap->mwdma_mask, ap->udma_mask);
3984
8343f889 3985 /* drive modes available */
37deecb5
TH
3986 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3987 dev->mwdma_mask, dev->udma_mask);
3988 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3989
b352e57d
AC
3990 /*
3991 * CFA Advanced TrueIDE timings are not allowed on a shared
3992 * cable
3993 */
3994 if (ata_dev_pair(dev)) {
3995 /* No PIO5 or PIO6 */
3996 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3997 /* No MWDMA3 or MWDMA 4 */
3998 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3999 }
4000
37deecb5
TH
4001 if (ata_dma_blacklisted(dev)) {
4002 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4003 ata_dev_printk(dev, KERN_WARNING,
4004 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4005 }
a6d5a51c 4006
14d66ab7
PV
4007 if ((host->flags & ATA_HOST_SIMPLEX) &&
4008 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4009 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4010 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4011 "other device, disabling DMA\n");
5444a6f4 4012 }
565083e1 4013
e424675f
JG
4014 if (ap->flags & ATA_FLAG_NO_IORDY)
4015 xfer_mask &= ata_pio_mask_no_iordy(dev);
4016
5444a6f4 4017 if (ap->ops->mode_filter)
a76b62ca 4018 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4019
8343f889
RH
4020 /* Apply cable rule here. Don't apply it early because when
4021 * we handle hot plug the cable type can itself change.
4022 * Check this last so that we know if the transfer rate was
4023 * solely limited by the cable.
4024 * Unknown or 80 wire cables reported host side are checked
4025 * drive side as well. Cases where we know a 40wire cable
4026 * is used safely for 80 are not checked here.
4027 */
4028 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4029 /* UDMA/44 or higher would be available */
4030 if((ap->cbl == ATA_CBL_PATA40) ||
4031 (ata_drive_40wire(dev->id) &&
4032 (ap->cbl == ATA_CBL_PATA_UNK ||
4033 ap->cbl == ATA_CBL_PATA80))) {
4034 ata_dev_printk(dev, KERN_WARNING,
4035 "limited to UDMA/33 due to 40-wire cable\n");
4036 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4037 }
4038
565083e1
TH
4039 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4040 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4041}
4042
1da177e4
LT
4043/**
4044 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4045 * @dev: Device to which command will be sent
4046 *
780a87f7
JG
4047 * Issue SET FEATURES - XFER MODE command to device @dev
4048 * on port @ap.
4049 *
1da177e4 4050 * LOCKING:
0cba632b 4051 * PCI/etc. bus probe sem.
83206a29
TH
4052 *
4053 * RETURNS:
4054 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4055 */
4056
3373efd8 4057static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4058{
a0123703 4059 struct ata_taskfile tf;
83206a29 4060 unsigned int err_mask;
1da177e4
LT
4061
4062 /* set up set-features taskfile */
4063 DPRINTK("set features - xfer mode\n");
4064
464cf177
TH
4065 /* Some controllers and ATAPI devices show flaky interrupt
4066 * behavior after setting xfer mode. Use polling instead.
4067 */
3373efd8 4068 ata_tf_init(dev, &tf);
a0123703
TH
4069 tf.command = ATA_CMD_SET_FEATURES;
4070 tf.feature = SETFEATURES_XFER;
464cf177 4071 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
4072 tf.protocol = ATA_PROT_NODATA;
4073 tf.nsect = dev->xfer_mode;
1da177e4 4074
3373efd8 4075 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9f45cbd3
KCA
4076
4077 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4078 return err_mask;
4079}
4080
4081/**
4082 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4083 * @dev: Device to which command will be sent
4084 * @enable: Whether to enable or disable the feature
4085 *
4086 * Issue SET FEATURES - SATA FEATURES command to device @dev
4087 * on port @ap with sector count set to indicate Asynchronous
4088 * Notification feature
4089 *
4090 * LOCKING:
4091 * PCI/etc. bus probe sem.
4092 *
4093 * RETURNS:
4094 * 0 on success, AC_ERR_* mask otherwise.
4095 */
4096static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4097{
4098 struct ata_taskfile tf;
4099 unsigned int err_mask;
4100
4101 /* set up set-features taskfile */
4102 DPRINTK("set features - SATA features\n");
4103
4104 ata_tf_init(dev, &tf);
4105 tf.command = ATA_CMD_SET_FEATURES;
4106 tf.feature = enable;
4107 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4108 tf.protocol = ATA_PROT_NODATA;
4109 tf.nsect = SATA_AN;
4110
4111 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 4112
83206a29
TH
4113 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4114 return err_mask;
1da177e4
LT
4115}
4116
8bf62ece
AL
4117/**
4118 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4119 * @dev: Device to which command will be sent
e2a7f77a
RD
4120 * @heads: Number of heads (taskfile parameter)
4121 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4122 *
4123 * LOCKING:
6aff8f1f
TH
4124 * Kernel thread context (may sleep)
4125 *
4126 * RETURNS:
4127 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4128 */
3373efd8
TH
4129static unsigned int ata_dev_init_params(struct ata_device *dev,
4130 u16 heads, u16 sectors)
8bf62ece 4131{
a0123703 4132 struct ata_taskfile tf;
6aff8f1f 4133 unsigned int err_mask;
8bf62ece
AL
4134
4135 /* Number of sectors per track 1-255. Number of heads 1-16 */
4136 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4137 return AC_ERR_INVALID;
8bf62ece
AL
4138
4139 /* set up init dev params taskfile */
4140 DPRINTK("init dev params \n");
4141
3373efd8 4142 ata_tf_init(dev, &tf);
a0123703
TH
4143 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4144 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4145 tf.protocol = ATA_PROT_NODATA;
4146 tf.nsect = sectors;
4147 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4148
3373efd8 4149 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
18b2466c
AC
4150 /* A clean abort indicates an original or just out of spec drive
4151 and we should continue as we issue the setup based on the
4152 drive reported working geometry */
4153 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4154 err_mask = 0;
8bf62ece 4155
6aff8f1f
TH
4156 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4157 return err_mask;
8bf62ece
AL
4158}
4159
1da177e4 4160/**
0cba632b
JG
4161 * ata_sg_clean - Unmap DMA memory associated with command
4162 * @qc: Command containing DMA memory to be released
4163 *
4164 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4165 *
4166 * LOCKING:
cca3974e 4167 * spin_lock_irqsave(host lock)
1da177e4 4168 */
70e6ad0c 4169void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4170{
4171 struct ata_port *ap = qc->ap;
cedc9a47 4172 struct scatterlist *sg = qc->__sg;
1da177e4 4173 int dir = qc->dma_dir;
cedc9a47 4174 void *pad_buf = NULL;
1da177e4 4175
a4631474
TH
4176 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4177 WARN_ON(sg == NULL);
1da177e4
LT
4178
4179 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4180 WARN_ON(qc->n_elem > 1);
1da177e4 4181
2c13b7ce 4182 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4183
cedc9a47
JG
4184 /* if we padded the buffer out to 32-bit bound, and data
4185 * xfer direction is from-device, we must copy from the
4186 * pad buffer back into the supplied buffer
4187 */
4188 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4189 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4190
4191 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4192 if (qc->n_elem)
2f1f610b 4193 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
4194 /* restore last sg */
4195 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4196 if (pad_buf) {
4197 struct scatterlist *psg = &qc->pad_sgent;
4198 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4199 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4200 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4201 }
4202 } else {
2e242fa9 4203 if (qc->n_elem)
2f1f610b 4204 dma_unmap_single(ap->dev,
e1410f2d
JG
4205 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4206 dir);
cedc9a47
JG
4207 /* restore sg */
4208 sg->length += qc->pad_len;
4209 if (pad_buf)
4210 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4211 pad_buf, qc->pad_len);
4212 }
1da177e4
LT
4213
4214 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4215 qc->__sg = NULL;
1da177e4
LT
4216}
4217
4218/**
4219 * ata_fill_sg - Fill PCI IDE PRD table
4220 * @qc: Metadata associated with taskfile to be transferred
4221 *
780a87f7
JG
4222 * Fill PCI IDE PRD (scatter-gather) table with segments
4223 * associated with the current disk command.
4224 *
1da177e4 4225 * LOCKING:
cca3974e 4226 * spin_lock_irqsave(host lock)
1da177e4
LT
4227 *
4228 */
4229static void ata_fill_sg(struct ata_queued_cmd *qc)
4230{
1da177e4 4231 struct ata_port *ap = qc->ap;
cedc9a47
JG
4232 struct scatterlist *sg;
4233 unsigned int idx;
1da177e4 4234
a4631474 4235 WARN_ON(qc->__sg == NULL);
f131883e 4236 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4237
4238 idx = 0;
cedc9a47 4239 ata_for_each_sg(sg, qc) {
1da177e4
LT
4240 u32 addr, offset;
4241 u32 sg_len, len;
4242
4243 /* determine if physical DMA addr spans 64K boundary.
4244 * Note h/w doesn't support 64-bit, so we unconditionally
4245 * truncate dma_addr_t to u32.
4246 */
4247 addr = (u32) sg_dma_address(sg);
4248 sg_len = sg_dma_len(sg);
4249
4250 while (sg_len) {
4251 offset = addr & 0xffff;
4252 len = sg_len;
4253 if ((offset + sg_len) > 0x10000)
4254 len = 0x10000 - offset;
4255
4256 ap->prd[idx].addr = cpu_to_le32(addr);
4257 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4258 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4259
4260 idx++;
4261 sg_len -= len;
4262 addr += len;
4263 }
4264 }
4265
4266 if (idx)
4267 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4268}
b9a4197e 4269
d26fc955
AC
4270/**
4271 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4272 * @qc: Metadata associated with taskfile to be transferred
4273 *
4274 * Fill PCI IDE PRD (scatter-gather) table with segments
4275 * associated with the current disk command. Perform the fill
4276 * so that we avoid writing any length 64K records for
4277 * controllers that don't follow the spec.
4278 *
4279 * LOCKING:
4280 * spin_lock_irqsave(host lock)
4281 *
4282 */
4283static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4284{
4285 struct ata_port *ap = qc->ap;
4286 struct scatterlist *sg;
4287 unsigned int idx;
4288
4289 WARN_ON(qc->__sg == NULL);
4290 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4291
4292 idx = 0;
4293 ata_for_each_sg(sg, qc) {
4294 u32 addr, offset;
4295 u32 sg_len, len, blen;
4296
4297 /* determine if physical DMA addr spans 64K boundary.
4298 * Note h/w doesn't support 64-bit, so we unconditionally
4299 * truncate dma_addr_t to u32.
4300 */
4301 addr = (u32) sg_dma_address(sg);
4302 sg_len = sg_dma_len(sg);
4303
4304 while (sg_len) {
4305 offset = addr & 0xffff;
4306 len = sg_len;
4307 if ((offset + sg_len) > 0x10000)
4308 len = 0x10000 - offset;
4309
4310 blen = len & 0xffff;
4311 ap->prd[idx].addr = cpu_to_le32(addr);
4312 if (blen == 0) {
4313 /* Some PATA chipsets like the CS5530 can't
4314 cope with 0x0000 meaning 64K as the spec says */
4315 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4316 blen = 0x8000;
4317 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4318 }
4319 ap->prd[idx].flags_len = cpu_to_le32(blen);
4320 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4321
4322 idx++;
4323 sg_len -= len;
4324 addr += len;
4325 }
4326 }
4327
4328 if (idx)
4329 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4330}
4331
1da177e4
LT
4332/**
4333 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4334 * @qc: Metadata associated with taskfile to check
4335 *
780a87f7
JG
4336 * Allow low-level driver to filter ATA PACKET commands, returning
4337 * a status indicating whether or not it is OK to use DMA for the
4338 * supplied PACKET command.
4339 *
1da177e4 4340 * LOCKING:
cca3974e 4341 * spin_lock_irqsave(host lock)
0cba632b 4342 *
1da177e4
LT
4343 * RETURNS: 0 when ATAPI DMA can be used
4344 * nonzero otherwise
4345 */
4346int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4347{
4348 struct ata_port *ap = qc->ap;
b9a4197e
TH
4349
4350 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4351 * few ATAPI devices choke on such DMA requests.
4352 */
4353 if (unlikely(qc->nbytes & 15))
4354 return 1;
6f23a31d 4355
1da177e4 4356 if (ap->ops->check_atapi_dma)
b9a4197e 4357 return ap->ops->check_atapi_dma(qc);
1da177e4 4358
b9a4197e 4359 return 0;
1da177e4 4360}
b9a4197e 4361
31cc23b3
TH
4362/**
4363 * ata_std_qc_defer - Check whether a qc needs to be deferred
4364 * @qc: ATA command in question
4365 *
4366 * Non-NCQ commands cannot run with any other command, NCQ or
4367 * not. As upper layer only knows the queue depth, we are
4368 * responsible for maintaining exclusion. This function checks
4369 * whether a new command @qc can be issued.
4370 *
4371 * LOCKING:
4372 * spin_lock_irqsave(host lock)
4373 *
4374 * RETURNS:
4375 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4376 */
4377int ata_std_qc_defer(struct ata_queued_cmd *qc)
4378{
4379 struct ata_link *link = qc->dev->link;
4380
4381 if (qc->tf.protocol == ATA_PROT_NCQ) {
4382 if (!ata_tag_valid(link->active_tag))
4383 return 0;
4384 } else {
4385 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4386 return 0;
4387 }
4388
4389 return ATA_DEFER_LINK;
4390}
4391
1da177e4
LT
4392/**
4393 * ata_qc_prep - Prepare taskfile for submission
4394 * @qc: Metadata associated with taskfile to be prepared
4395 *
780a87f7
JG
4396 * Prepare ATA taskfile for submission.
4397 *
1da177e4 4398 * LOCKING:
cca3974e 4399 * spin_lock_irqsave(host lock)
1da177e4
LT
4400 */
4401void ata_qc_prep(struct ata_queued_cmd *qc)
4402{
4403 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4404 return;
4405
4406 ata_fill_sg(qc);
4407}
4408
d26fc955
AC
4409/**
4410 * ata_dumb_qc_prep - Prepare taskfile for submission
4411 * @qc: Metadata associated with taskfile to be prepared
4412 *
4413 * Prepare ATA taskfile for submission.
4414 *
4415 * LOCKING:
4416 * spin_lock_irqsave(host lock)
4417 */
4418void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4419{
4420 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4421 return;
4422
4423 ata_fill_sg_dumb(qc);
4424}
4425
e46834cd
BK
4426void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4427
0cba632b
JG
4428/**
4429 * ata_sg_init_one - Associate command with memory buffer
4430 * @qc: Command to be associated
4431 * @buf: Memory buffer
4432 * @buflen: Length of memory buffer, in bytes.
4433 *
4434 * Initialize the data-related elements of queued_cmd @qc
4435 * to point to a single memory buffer, @buf of byte length @buflen.
4436 *
4437 * LOCKING:
cca3974e 4438 * spin_lock_irqsave(host lock)
0cba632b
JG
4439 */
4440
1da177e4
LT
4441void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4442{
1da177e4
LT
4443 qc->flags |= ATA_QCFLAG_SINGLE;
4444
cedc9a47 4445 qc->__sg = &qc->sgent;
1da177e4 4446 qc->n_elem = 1;
cedc9a47 4447 qc->orig_n_elem = 1;
1da177e4 4448 qc->buf_virt = buf;
233277ca 4449 qc->nbytes = buflen;
1da177e4 4450
61c0596c 4451 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4452}
4453
0cba632b
JG
4454/**
4455 * ata_sg_init - Associate command with scatter-gather table.
4456 * @qc: Command to be associated
4457 * @sg: Scatter-gather table.
4458 * @n_elem: Number of elements in s/g table.
4459 *
4460 * Initialize the data-related elements of queued_cmd @qc
4461 * to point to a scatter-gather table @sg, containing @n_elem
4462 * elements.
4463 *
4464 * LOCKING:
cca3974e 4465 * spin_lock_irqsave(host lock)
0cba632b
JG
4466 */
4467
1da177e4
LT
4468void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4469 unsigned int n_elem)
4470{
4471 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4472 qc->__sg = sg;
1da177e4 4473 qc->n_elem = n_elem;
cedc9a47 4474 qc->orig_n_elem = n_elem;
1da177e4
LT
4475}
4476
4477/**
0cba632b
JG
4478 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4479 * @qc: Command with memory buffer to be mapped.
4480 *
4481 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4482 *
4483 * LOCKING:
cca3974e 4484 * spin_lock_irqsave(host lock)
1da177e4
LT
4485 *
4486 * RETURNS:
0cba632b 4487 * Zero on success, negative on error.
1da177e4
LT
4488 */
4489
4490static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4491{
4492 struct ata_port *ap = qc->ap;
4493 int dir = qc->dma_dir;
cedc9a47 4494 struct scatterlist *sg = qc->__sg;
1da177e4 4495 dma_addr_t dma_address;
2e242fa9 4496 int trim_sg = 0;
1da177e4 4497
cedc9a47
JG
4498 /* we must lengthen transfers to end on a 32-bit boundary */
4499 qc->pad_len = sg->length & 3;
4500 if (qc->pad_len) {
4501 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4502 struct scatterlist *psg = &qc->pad_sgent;
4503
a4631474 4504 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4505
4506 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4507
4508 if (qc->tf.flags & ATA_TFLAG_WRITE)
4509 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4510 qc->pad_len);
4511
4512 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4513 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4514 /* trim sg */
4515 sg->length -= qc->pad_len;
2e242fa9
TH
4516 if (sg->length == 0)
4517 trim_sg = 1;
cedc9a47
JG
4518
4519 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4520 sg->length, qc->pad_len);
4521 }
4522
2e242fa9
TH
4523 if (trim_sg) {
4524 qc->n_elem--;
e1410f2d
JG
4525 goto skip_map;
4526 }
4527
2f1f610b 4528 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4529 sg->length, dir);
537a95d9
TH
4530 if (dma_mapping_error(dma_address)) {
4531 /* restore sg */
4532 sg->length += qc->pad_len;
1da177e4 4533 return -1;
537a95d9 4534 }
1da177e4
LT
4535
4536 sg_dma_address(sg) = dma_address;
32529e01 4537 sg_dma_len(sg) = sg->length;
1da177e4 4538
2e242fa9 4539skip_map:
1da177e4
LT
4540 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4541 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4542
4543 return 0;
4544}
4545
4546/**
0cba632b
JG
4547 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4548 * @qc: Command with scatter-gather table to be mapped.
4549 *
4550 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4551 *
4552 * LOCKING:
cca3974e 4553 * spin_lock_irqsave(host lock)
1da177e4
LT
4554 *
4555 * RETURNS:
0cba632b 4556 * Zero on success, negative on error.
1da177e4
LT
4557 *
4558 */
4559
4560static int ata_sg_setup(struct ata_queued_cmd *qc)
4561{
4562 struct ata_port *ap = qc->ap;
cedc9a47
JG
4563 struct scatterlist *sg = qc->__sg;
4564 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4565 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4566
44877b4e 4567 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4568 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4569
cedc9a47
JG
4570 /* we must lengthen transfers to end on a 32-bit boundary */
4571 qc->pad_len = lsg->length & 3;
4572 if (qc->pad_len) {
4573 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4574 struct scatterlist *psg = &qc->pad_sgent;
4575 unsigned int offset;
4576
a4631474 4577 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4578
4579 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4580
4581 /*
4582 * psg->page/offset are used to copy to-be-written
4583 * data in this function or read data in ata_sg_clean.
4584 */
4585 offset = lsg->offset + lsg->length - qc->pad_len;
4586 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4587 psg->offset = offset_in_page(offset);
4588
4589 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4590 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4591 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4592 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4593 }
4594
4595 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4596 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4597 /* trim last sg */
4598 lsg->length -= qc->pad_len;
e1410f2d
JG
4599 if (lsg->length == 0)
4600 trim_sg = 1;
cedc9a47
JG
4601
4602 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4603 qc->n_elem - 1, lsg->length, qc->pad_len);
4604 }
4605
e1410f2d
JG
4606 pre_n_elem = qc->n_elem;
4607 if (trim_sg && pre_n_elem)
4608 pre_n_elem--;
4609
4610 if (!pre_n_elem) {
4611 n_elem = 0;
4612 goto skip_map;
4613 }
4614
1da177e4 4615 dir = qc->dma_dir;
2f1f610b 4616 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4617 if (n_elem < 1) {
4618 /* restore last sg */
4619 lsg->length += qc->pad_len;
1da177e4 4620 return -1;
537a95d9 4621 }
1da177e4
LT
4622
4623 DPRINTK("%d sg elements mapped\n", n_elem);
4624
e1410f2d 4625skip_map:
1da177e4
LT
4626 qc->n_elem = n_elem;
4627
4628 return 0;
4629}
4630
0baab86b 4631/**
c893a3ae 4632 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4633 * @buf: Buffer to swap
4634 * @buf_words: Number of 16-bit words in buffer.
4635 *
4636 * Swap halves of 16-bit words if needed to convert from
4637 * little-endian byte order to native cpu byte order, or
4638 * vice-versa.
4639 *
4640 * LOCKING:
6f0ef4fa 4641 * Inherited from caller.
0baab86b 4642 */
1da177e4
LT
4643void swap_buf_le16(u16 *buf, unsigned int buf_words)
4644{
4645#ifdef __BIG_ENDIAN
4646 unsigned int i;
4647
4648 for (i = 0; i < buf_words; i++)
4649 buf[i] = le16_to_cpu(buf[i]);
4650#endif /* __BIG_ENDIAN */
4651}
4652
6ae4cfb5 4653/**
0d5ff566 4654 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4655 * @adev: device to target
6ae4cfb5
AL
4656 * @buf: data buffer
4657 * @buflen: buffer length
344babaa 4658 * @write_data: read/write
6ae4cfb5
AL
4659 *
4660 * Transfer data from/to the device data register by PIO.
4661 *
4662 * LOCKING:
4663 * Inherited from caller.
6ae4cfb5 4664 */
0d5ff566
TH
4665void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4666 unsigned int buflen, int write_data)
1da177e4 4667{
9af5c9c9 4668 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4669 unsigned int words = buflen >> 1;
1da177e4 4670
6ae4cfb5 4671 /* Transfer multiple of 2 bytes */
1da177e4 4672 if (write_data)
0d5ff566 4673 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4674 else
0d5ff566 4675 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4676
4677 /* Transfer trailing 1 byte, if any. */
4678 if (unlikely(buflen & 0x01)) {
4679 u16 align_buf[1] = { 0 };
4680 unsigned char *trailing_buf = buf + buflen - 1;
4681
4682 if (write_data) {
4683 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4684 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4685 } else {
0d5ff566 4686 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4687 memcpy(trailing_buf, align_buf, 1);
4688 }
4689 }
1da177e4
LT
4690}
4691
75e99585 4692/**
0d5ff566 4693 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4694 * @adev: device to target
4695 * @buf: data buffer
4696 * @buflen: buffer length
4697 * @write_data: read/write
4698 *
88574551 4699 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4700 * transfer with interrupts disabled.
4701 *
4702 * LOCKING:
4703 * Inherited from caller.
4704 */
0d5ff566
TH
4705void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4706 unsigned int buflen, int write_data)
75e99585
AC
4707{
4708 unsigned long flags;
4709 local_irq_save(flags);
0d5ff566 4710 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4711 local_irq_restore(flags);
4712}
4713
4714
6ae4cfb5 4715/**
5a5dbd18 4716 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4717 * @qc: Command on going
4718 *
5a5dbd18 4719 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4720 *
4721 * LOCKING:
4722 * Inherited from caller.
4723 */
4724
1da177e4
LT
4725static void ata_pio_sector(struct ata_queued_cmd *qc)
4726{
4727 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4728 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4729 struct ata_port *ap = qc->ap;
4730 struct page *page;
4731 unsigned int offset;
4732 unsigned char *buf;
4733
5a5dbd18 4734 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4735 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4736
4737 page = sg[qc->cursg].page;
726f0785 4738 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4739
4740 /* get the current page and offset */
4741 page = nth_page(page, (offset >> PAGE_SHIFT));
4742 offset %= PAGE_SIZE;
4743
1da177e4
LT
4744 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4745
91b8b313
AL
4746 if (PageHighMem(page)) {
4747 unsigned long flags;
4748
a6b2c5d4 4749 /* FIXME: use a bounce buffer */
91b8b313
AL
4750 local_irq_save(flags);
4751 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4752
91b8b313 4753 /* do the actual data transfer */
5a5dbd18 4754 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4755
91b8b313
AL
4756 kunmap_atomic(buf, KM_IRQ0);
4757 local_irq_restore(flags);
4758 } else {
4759 buf = page_address(page);
5a5dbd18 4760 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4761 }
1da177e4 4762
5a5dbd18
ML
4763 qc->curbytes += qc->sect_size;
4764 qc->cursg_ofs += qc->sect_size;
1da177e4 4765
726f0785 4766 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4767 qc->cursg++;
4768 qc->cursg_ofs = 0;
4769 }
1da177e4 4770}
1da177e4 4771
07f6f7d0 4772/**
5a5dbd18 4773 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4774 * @qc: Command on going
4775 *
5a5dbd18 4776 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4777 * ATA device for the DRQ request.
4778 *
4779 * LOCKING:
4780 * Inherited from caller.
4781 */
1da177e4 4782
07f6f7d0
AL
4783static void ata_pio_sectors(struct ata_queued_cmd *qc)
4784{
4785 if (is_multi_taskfile(&qc->tf)) {
4786 /* READ/WRITE MULTIPLE */
4787 unsigned int nsect;
4788
587005de 4789 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4790
5a5dbd18 4791 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4792 qc->dev->multi_count);
07f6f7d0
AL
4793 while (nsect--)
4794 ata_pio_sector(qc);
4795 } else
4796 ata_pio_sector(qc);
4cc980b3
AL
4797
4798 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
4799}
4800
c71c1857
AL
4801/**
4802 * atapi_send_cdb - Write CDB bytes to hardware
4803 * @ap: Port to which ATAPI device is attached.
4804 * @qc: Taskfile currently active
4805 *
4806 * When device has indicated its readiness to accept
4807 * a CDB, this function is called. Send the CDB.
4808 *
4809 * LOCKING:
4810 * caller.
4811 */
4812
4813static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4814{
4815 /* send SCSI cdb */
4816 DPRINTK("send cdb\n");
db024d53 4817 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4818
a6b2c5d4 4819 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4820 ata_altstatus(ap); /* flush */
4821
4822 switch (qc->tf.protocol) {
4823 case ATA_PROT_ATAPI:
4824 ap->hsm_task_state = HSM_ST;
4825 break;
4826 case ATA_PROT_ATAPI_NODATA:
4827 ap->hsm_task_state = HSM_ST_LAST;
4828 break;
4829 case ATA_PROT_ATAPI_DMA:
4830 ap->hsm_task_state = HSM_ST_LAST;
4831 /* initiate bmdma */
4832 ap->ops->bmdma_start(qc);
4833 break;
4834 }
1da177e4
LT
4835}
4836
6ae4cfb5
AL
4837/**
4838 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4839 * @qc: Command on going
4840 * @bytes: number of bytes
4841 *
4842 * Transfer Transfer data from/to the ATAPI device.
4843 *
4844 * LOCKING:
4845 * Inherited from caller.
4846 *
4847 */
4848
1da177e4
LT
4849static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4850{
4851 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4852 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4853 struct ata_port *ap = qc->ap;
4854 struct page *page;
4855 unsigned char *buf;
4856 unsigned int offset, count;
4857
563a6e1f 4858 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4859 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4860
4861next_sg:
563a6e1f 4862 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4863 /*
563a6e1f
AL
4864 * The end of qc->sg is reached and the device expects
4865 * more data to transfer. In order not to overrun qc->sg
4866 * and fulfill length specified in the byte count register,
4867 * - for read case, discard trailing data from the device
4868 * - for write case, padding zero data to the device
4869 */
4870 u16 pad_buf[1] = { 0 };
4871 unsigned int words = bytes >> 1;
4872 unsigned int i;
4873
4874 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4875 ata_dev_printk(qc->dev, KERN_WARNING,
4876 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4877
4878 for (i = 0; i < words; i++)
a6b2c5d4 4879 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4880
14be71f4 4881 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4882 return;
4883 }
4884
cedc9a47 4885 sg = &qc->__sg[qc->cursg];
1da177e4 4886
1da177e4
LT
4887 page = sg->page;
4888 offset = sg->offset + qc->cursg_ofs;
4889
4890 /* get the current page and offset */
4891 page = nth_page(page, (offset >> PAGE_SHIFT));
4892 offset %= PAGE_SIZE;
4893
6952df03 4894 /* don't overrun current sg */
32529e01 4895 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4896
4897 /* don't cross page boundaries */
4898 count = min(count, (unsigned int)PAGE_SIZE - offset);
4899
7282aa4b
AL
4900 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4901
91b8b313
AL
4902 if (PageHighMem(page)) {
4903 unsigned long flags;
4904
a6b2c5d4 4905 /* FIXME: use bounce buffer */
91b8b313
AL
4906 local_irq_save(flags);
4907 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4908
91b8b313 4909 /* do the actual data transfer */
a6b2c5d4 4910 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4911
91b8b313
AL
4912 kunmap_atomic(buf, KM_IRQ0);
4913 local_irq_restore(flags);
4914 } else {
4915 buf = page_address(page);
a6b2c5d4 4916 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4917 }
1da177e4
LT
4918
4919 bytes -= count;
4920 qc->curbytes += count;
4921 qc->cursg_ofs += count;
4922
32529e01 4923 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4924 qc->cursg++;
4925 qc->cursg_ofs = 0;
4926 }
4927
563a6e1f 4928 if (bytes)
1da177e4 4929 goto next_sg;
1da177e4
LT
4930}
4931
6ae4cfb5
AL
4932/**
4933 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4934 * @qc: Command on going
4935 *
4936 * Transfer Transfer data from/to the ATAPI device.
4937 *
4938 * LOCKING:
4939 * Inherited from caller.
6ae4cfb5
AL
4940 */
4941
1da177e4
LT
4942static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4943{
4944 struct ata_port *ap = qc->ap;
4945 struct ata_device *dev = qc->dev;
4946 unsigned int ireason, bc_lo, bc_hi, bytes;
4947 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4948
eec4c3f3
AL
4949 /* Abuse qc->result_tf for temp storage of intermediate TF
4950 * here to save some kernel stack usage.
4951 * For normal completion, qc->result_tf is not relevant. For
4952 * error, qc->result_tf is later overwritten by ata_qc_complete().
4953 * So, the correctness of qc->result_tf is not affected.
4954 */
4955 ap->ops->tf_read(ap, &qc->result_tf);
4956 ireason = qc->result_tf.nsect;
4957 bc_lo = qc->result_tf.lbam;
4958 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4959 bytes = (bc_hi << 8) | bc_lo;
4960
4961 /* shall be cleared to zero, indicating xfer of data */
4962 if (ireason & (1 << 0))
4963 goto err_out;
4964
4965 /* make sure transfer direction matches expected */
4966 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4967 if (do_write != i_write)
4968 goto err_out;
4969
44877b4e 4970 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4971
1da177e4 4972 __atapi_pio_bytes(qc, bytes);
4cc980b3 4973 ata_altstatus(ap); /* flush */
1da177e4
LT
4974
4975 return;
4976
4977err_out:
f15a1daf 4978 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4979 qc->err_mask |= AC_ERR_HSM;
14be71f4 4980 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4981}
4982
4983/**
c234fb00
AL
4984 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4985 * @ap: the target ata_port
4986 * @qc: qc on going
1da177e4 4987 *
c234fb00
AL
4988 * RETURNS:
4989 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4990 */
c234fb00
AL
4991
4992static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4993{
c234fb00
AL
4994 if (qc->tf.flags & ATA_TFLAG_POLLING)
4995 return 1;
1da177e4 4996
c234fb00
AL
4997 if (ap->hsm_task_state == HSM_ST_FIRST) {
4998 if (qc->tf.protocol == ATA_PROT_PIO &&
4999 (qc->tf.flags & ATA_TFLAG_WRITE))
5000 return 1;
1da177e4 5001
c234fb00
AL
5002 if (is_atapi_taskfile(&qc->tf) &&
5003 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5004 return 1;
fe79e683
AL
5005 }
5006
c234fb00
AL
5007 return 0;
5008}
1da177e4 5009
c17ea20d
TH
5010/**
5011 * ata_hsm_qc_complete - finish a qc running on standard HSM
5012 * @qc: Command to complete
5013 * @in_wq: 1 if called from workqueue, 0 otherwise
5014 *
5015 * Finish @qc which is running on standard HSM.
5016 *
5017 * LOCKING:
cca3974e 5018 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5019 * Otherwise, none on entry and grabs host lock.
5020 */
5021static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5022{
5023 struct ata_port *ap = qc->ap;
5024 unsigned long flags;
5025
5026 if (ap->ops->error_handler) {
5027 if (in_wq) {
ba6a1308 5028 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5029
cca3974e
JG
5030 /* EH might have kicked in while host lock is
5031 * released.
c17ea20d
TH
5032 */
5033 qc = ata_qc_from_tag(ap, qc->tag);
5034 if (qc) {
5035 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5036 ap->ops->irq_on(ap);
c17ea20d
TH
5037 ata_qc_complete(qc);
5038 } else
5039 ata_port_freeze(ap);
5040 }
5041
ba6a1308 5042 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5043 } else {
5044 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5045 ata_qc_complete(qc);
5046 else
5047 ata_port_freeze(ap);
5048 }
5049 } else {
5050 if (in_wq) {
ba6a1308 5051 spin_lock_irqsave(ap->lock, flags);
83625006 5052 ap->ops->irq_on(ap);
c17ea20d 5053 ata_qc_complete(qc);
ba6a1308 5054 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5055 } else
5056 ata_qc_complete(qc);
5057 }
5058}
5059
bb5cb290
AL
5060/**
5061 * ata_hsm_move - move the HSM to the next state.
5062 * @ap: the target ata_port
5063 * @qc: qc on going
5064 * @status: current device status
5065 * @in_wq: 1 if called from workqueue, 0 otherwise
5066 *
5067 * RETURNS:
5068 * 1 when poll next status needed, 0 otherwise.
5069 */
9a1004d0
TH
5070int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5071 u8 status, int in_wq)
e2cec771 5072{
bb5cb290
AL
5073 unsigned long flags = 0;
5074 int poll_next;
5075
6912ccd5
AL
5076 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5077
bb5cb290
AL
5078 /* Make sure ata_qc_issue_prot() does not throw things
5079 * like DMA polling into the workqueue. Notice that
5080 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5081 */
c234fb00 5082 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5083
e2cec771 5084fsm_start:
999bb6f4 5085 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5086 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5087
e2cec771
AL
5088 switch (ap->hsm_task_state) {
5089 case HSM_ST_FIRST:
bb5cb290
AL
5090 /* Send first data block or PACKET CDB */
5091
5092 /* If polling, we will stay in the work queue after
5093 * sending the data. Otherwise, interrupt handler
5094 * takes over after sending the data.
5095 */
5096 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5097
e2cec771 5098 /* check device status */
3655d1d3
AL
5099 if (unlikely((status & ATA_DRQ) == 0)) {
5100 /* handle BSY=0, DRQ=0 as error */
5101 if (likely(status & (ATA_ERR | ATA_DF)))
5102 /* device stops HSM for abort/error */
5103 qc->err_mask |= AC_ERR_DEV;
5104 else
5105 /* HSM violation. Let EH handle this */
5106 qc->err_mask |= AC_ERR_HSM;
5107
14be71f4 5108 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5109 goto fsm_start;
1da177e4
LT
5110 }
5111
71601958
AL
5112 /* Device should not ask for data transfer (DRQ=1)
5113 * when it finds something wrong.
eee6c32f
AL
5114 * We ignore DRQ here and stop the HSM by
5115 * changing hsm_task_state to HSM_ST_ERR and
5116 * let the EH abort the command or reset the device.
71601958
AL
5117 */
5118 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5119 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5120 "error, dev_stat 0x%X\n", status);
3655d1d3 5121 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5122 ap->hsm_task_state = HSM_ST_ERR;
5123 goto fsm_start;
71601958 5124 }
1da177e4 5125
bb5cb290
AL
5126 /* Send the CDB (atapi) or the first data block (ata pio out).
5127 * During the state transition, interrupt handler shouldn't
5128 * be invoked before the data transfer is complete and
5129 * hsm_task_state is changed. Hence, the following locking.
5130 */
5131 if (in_wq)
ba6a1308 5132 spin_lock_irqsave(ap->lock, flags);
1da177e4 5133
bb5cb290
AL
5134 if (qc->tf.protocol == ATA_PROT_PIO) {
5135 /* PIO data out protocol.
5136 * send first data block.
5137 */
0565c26d 5138
bb5cb290
AL
5139 /* ata_pio_sectors() might change the state
5140 * to HSM_ST_LAST. so, the state is changed here
5141 * before ata_pio_sectors().
5142 */
5143 ap->hsm_task_state = HSM_ST;
5144 ata_pio_sectors(qc);
bb5cb290
AL
5145 } else
5146 /* send CDB */
5147 atapi_send_cdb(ap, qc);
5148
5149 if (in_wq)
ba6a1308 5150 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5151
5152 /* if polling, ata_pio_task() handles the rest.
5153 * otherwise, interrupt handler takes over from here.
5154 */
e2cec771 5155 break;
1c848984 5156
e2cec771
AL
5157 case HSM_ST:
5158 /* complete command or read/write the data register */
5159 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5160 /* ATAPI PIO protocol */
5161 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5162 /* No more data to transfer or device error.
5163 * Device error will be tagged in HSM_ST_LAST.
5164 */
e2cec771
AL
5165 ap->hsm_task_state = HSM_ST_LAST;
5166 goto fsm_start;
5167 }
1da177e4 5168
71601958
AL
5169 /* Device should not ask for data transfer (DRQ=1)
5170 * when it finds something wrong.
eee6c32f
AL
5171 * We ignore DRQ here and stop the HSM by
5172 * changing hsm_task_state to HSM_ST_ERR and
5173 * let the EH abort the command or reset the device.
71601958
AL
5174 */
5175 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5176 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5177 "device error, dev_stat 0x%X\n",
5178 status);
3655d1d3 5179 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5180 ap->hsm_task_state = HSM_ST_ERR;
5181 goto fsm_start;
71601958 5182 }
1da177e4 5183
e2cec771 5184 atapi_pio_bytes(qc);
7fb6ec28 5185
e2cec771
AL
5186 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5187 /* bad ireason reported by device */
5188 goto fsm_start;
1da177e4 5189
e2cec771
AL
5190 } else {
5191 /* ATA PIO protocol */
5192 if (unlikely((status & ATA_DRQ) == 0)) {
5193 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5194 if (likely(status & (ATA_ERR | ATA_DF)))
5195 /* device stops HSM for abort/error */
5196 qc->err_mask |= AC_ERR_DEV;
5197 else
55a8e2c8
TH
5198 /* HSM violation. Let EH handle this.
5199 * Phantom devices also trigger this
5200 * condition. Mark hint.
5201 */
5202 qc->err_mask |= AC_ERR_HSM |
5203 AC_ERR_NODEV_HINT;
3655d1d3 5204
e2cec771
AL
5205 ap->hsm_task_state = HSM_ST_ERR;
5206 goto fsm_start;
5207 }
1da177e4 5208
eee6c32f
AL
5209 /* For PIO reads, some devices may ask for
5210 * data transfer (DRQ=1) alone with ERR=1.
5211 * We respect DRQ here and transfer one
5212 * block of junk data before changing the
5213 * hsm_task_state to HSM_ST_ERR.
5214 *
5215 * For PIO writes, ERR=1 DRQ=1 doesn't make
5216 * sense since the data block has been
5217 * transferred to the device.
71601958
AL
5218 */
5219 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5220 /* data might be corrputed */
5221 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5222
5223 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5224 ata_pio_sectors(qc);
eee6c32f
AL
5225 status = ata_wait_idle(ap);
5226 }
5227
3655d1d3
AL
5228 if (status & (ATA_BUSY | ATA_DRQ))
5229 qc->err_mask |= AC_ERR_HSM;
5230
eee6c32f
AL
5231 /* ata_pio_sectors() might change the
5232 * state to HSM_ST_LAST. so, the state
5233 * is changed after ata_pio_sectors().
5234 */
5235 ap->hsm_task_state = HSM_ST_ERR;
5236 goto fsm_start;
71601958
AL
5237 }
5238
e2cec771
AL
5239 ata_pio_sectors(qc);
5240
5241 if (ap->hsm_task_state == HSM_ST_LAST &&
5242 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5243 /* all data read */
52a32205 5244 status = ata_wait_idle(ap);
e2cec771
AL
5245 goto fsm_start;
5246 }
5247 }
5248
bb5cb290 5249 poll_next = 1;
1da177e4
LT
5250 break;
5251
14be71f4 5252 case HSM_ST_LAST:
6912ccd5
AL
5253 if (unlikely(!ata_ok(status))) {
5254 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5255 ap->hsm_task_state = HSM_ST_ERR;
5256 goto fsm_start;
5257 }
5258
5259 /* no more data to transfer */
4332a771 5260 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5261 ap->print_id, qc->dev->devno, status);
e2cec771 5262
6912ccd5
AL
5263 WARN_ON(qc->err_mask);
5264
e2cec771 5265 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5266
e2cec771 5267 /* complete taskfile transaction */
c17ea20d 5268 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5269
5270 poll_next = 0;
1da177e4
LT
5271 break;
5272
14be71f4 5273 case HSM_ST_ERR:
e2cec771
AL
5274 /* make sure qc->err_mask is available to
5275 * know what's wrong and recover
5276 */
5277 WARN_ON(qc->err_mask == 0);
5278
5279 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5280
999bb6f4 5281 /* complete taskfile transaction */
c17ea20d 5282 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5283
5284 poll_next = 0;
e2cec771
AL
5285 break;
5286 default:
bb5cb290 5287 poll_next = 0;
6912ccd5 5288 BUG();
1da177e4
LT
5289 }
5290
bb5cb290 5291 return poll_next;
1da177e4
LT
5292}
5293
65f27f38 5294static void ata_pio_task(struct work_struct *work)
8061f5f0 5295{
65f27f38
DH
5296 struct ata_port *ap =
5297 container_of(work, struct ata_port, port_task.work);
5298 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5299 u8 status;
a1af3734 5300 int poll_next;
8061f5f0 5301
7fb6ec28 5302fsm_start:
a1af3734 5303 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5304
a1af3734
AL
5305 /*
5306 * This is purely heuristic. This is a fast path.
5307 * Sometimes when we enter, BSY will be cleared in
5308 * a chk-status or two. If not, the drive is probably seeking
5309 * or something. Snooze for a couple msecs, then
5310 * chk-status again. If still busy, queue delayed work.
5311 */
5312 status = ata_busy_wait(ap, ATA_BUSY, 5);
5313 if (status & ATA_BUSY) {
5314 msleep(2);
5315 status = ata_busy_wait(ap, ATA_BUSY, 10);
5316 if (status & ATA_BUSY) {
31ce6dae 5317 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5318 return;
5319 }
8061f5f0
TH
5320 }
5321
a1af3734
AL
5322 /* move the HSM */
5323 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5324
a1af3734
AL
5325 /* another command or interrupt handler
5326 * may be running at this point.
5327 */
5328 if (poll_next)
7fb6ec28 5329 goto fsm_start;
8061f5f0
TH
5330}
5331
1da177e4
LT
5332/**
5333 * ata_qc_new - Request an available ATA command, for queueing
5334 * @ap: Port associated with device @dev
5335 * @dev: Device from whom we request an available command structure
5336 *
5337 * LOCKING:
0cba632b 5338 * None.
1da177e4
LT
5339 */
5340
5341static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5342{
5343 struct ata_queued_cmd *qc = NULL;
5344 unsigned int i;
5345
e3180499 5346 /* no command while frozen */
b51e9e5d 5347 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5348 return NULL;
5349
2ab7db1f
TH
5350 /* the last tag is reserved for internal command. */
5351 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5352 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5353 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5354 break;
5355 }
5356
5357 if (qc)
5358 qc->tag = i;
5359
5360 return qc;
5361}
5362
5363/**
5364 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5365 * @dev: Device from whom we request an available command structure
5366 *
5367 * LOCKING:
0cba632b 5368 * None.
1da177e4
LT
5369 */
5370
3373efd8 5371struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5372{
9af5c9c9 5373 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5374 struct ata_queued_cmd *qc;
5375
5376 qc = ata_qc_new(ap);
5377 if (qc) {
1da177e4
LT
5378 qc->scsicmd = NULL;
5379 qc->ap = ap;
5380 qc->dev = dev;
1da177e4 5381
2c13b7ce 5382 ata_qc_reinit(qc);
1da177e4
LT
5383 }
5384
5385 return qc;
5386}
5387
1da177e4
LT
5388/**
5389 * ata_qc_free - free unused ata_queued_cmd
5390 * @qc: Command to complete
5391 *
5392 * Designed to free unused ata_queued_cmd object
5393 * in case something prevents using it.
5394 *
5395 * LOCKING:
cca3974e 5396 * spin_lock_irqsave(host lock)
1da177e4
LT
5397 */
5398void ata_qc_free(struct ata_queued_cmd *qc)
5399{
4ba946e9
TH
5400 struct ata_port *ap = qc->ap;
5401 unsigned int tag;
5402
a4631474 5403 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5404
4ba946e9
TH
5405 qc->flags = 0;
5406 tag = qc->tag;
5407 if (likely(ata_tag_valid(tag))) {
4ba946e9 5408 qc->tag = ATA_TAG_POISON;
6cec4a39 5409 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5410 }
1da177e4
LT
5411}
5412
76014427 5413void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5414{
dedaf2b0 5415 struct ata_port *ap = qc->ap;
9af5c9c9 5416 struct ata_link *link = qc->dev->link;
dedaf2b0 5417
a4631474
TH
5418 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5419 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5420
5421 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5422 ata_sg_clean(qc);
5423
7401abf2 5424 /* command should be marked inactive atomically with qc completion */
da917d69 5425 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5426 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5427 if (!link->sactive)
5428 ap->nr_active_links--;
5429 } else {
9af5c9c9 5430 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5431 ap->nr_active_links--;
5432 }
5433
5434 /* clear exclusive status */
5435 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5436 ap->excl_link == link))
5437 ap->excl_link = NULL;
7401abf2 5438
3f3791d3
AL
5439 /* atapi: mark qc as inactive to prevent the interrupt handler
5440 * from completing the command twice later, before the error handler
5441 * is called. (when rc != 0 and atapi request sense is needed)
5442 */
5443 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5444 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5445
1da177e4 5446 /* call completion callback */
77853bf2 5447 qc->complete_fn(qc);
1da177e4
LT
5448}
5449
39599a53
TH
5450static void fill_result_tf(struct ata_queued_cmd *qc)
5451{
5452 struct ata_port *ap = qc->ap;
5453
39599a53 5454 qc->result_tf.flags = qc->tf.flags;
4742d54f 5455 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5456}
5457
f686bcb8
TH
5458/**
5459 * ata_qc_complete - Complete an active ATA command
5460 * @qc: Command to complete
5461 * @err_mask: ATA Status register contents
5462 *
5463 * Indicate to the mid and upper layers that an ATA
5464 * command has completed, with either an ok or not-ok status.
5465 *
5466 * LOCKING:
cca3974e 5467 * spin_lock_irqsave(host lock)
f686bcb8
TH
5468 */
5469void ata_qc_complete(struct ata_queued_cmd *qc)
5470{
5471 struct ata_port *ap = qc->ap;
5472
5473 /* XXX: New EH and old EH use different mechanisms to
5474 * synchronize EH with regular execution path.
5475 *
5476 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5477 * Normal execution path is responsible for not accessing a
5478 * failed qc. libata core enforces the rule by returning NULL
5479 * from ata_qc_from_tag() for failed qcs.
5480 *
5481 * Old EH depends on ata_qc_complete() nullifying completion
5482 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5483 * not synchronize with interrupt handler. Only PIO task is
5484 * taken care of.
5485 */
5486 if (ap->ops->error_handler) {
b51e9e5d 5487 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5488
5489 if (unlikely(qc->err_mask))
5490 qc->flags |= ATA_QCFLAG_FAILED;
5491
5492 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5493 if (!ata_tag_internal(qc->tag)) {
5494 /* always fill result TF for failed qc */
39599a53 5495 fill_result_tf(qc);
f686bcb8
TH
5496 ata_qc_schedule_eh(qc);
5497 return;
5498 }
5499 }
5500
5501 /* read result TF if requested */
5502 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5503 fill_result_tf(qc);
f686bcb8
TH
5504
5505 __ata_qc_complete(qc);
5506 } else {
5507 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5508 return;
5509
5510 /* read result TF if failed or requested */
5511 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5512 fill_result_tf(qc);
f686bcb8
TH
5513
5514 __ata_qc_complete(qc);
5515 }
5516}
5517
dedaf2b0
TH
5518/**
5519 * ata_qc_complete_multiple - Complete multiple qcs successfully
5520 * @ap: port in question
5521 * @qc_active: new qc_active mask
5522 * @finish_qc: LLDD callback invoked before completing a qc
5523 *
5524 * Complete in-flight commands. This functions is meant to be
5525 * called from low-level driver's interrupt routine to complete
5526 * requests normally. ap->qc_active and @qc_active is compared
5527 * and commands are completed accordingly.
5528 *
5529 * LOCKING:
cca3974e 5530 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5531 *
5532 * RETURNS:
5533 * Number of completed commands on success, -errno otherwise.
5534 */
5535int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5536 void (*finish_qc)(struct ata_queued_cmd *))
5537{
5538 int nr_done = 0;
5539 u32 done_mask;
5540 int i;
5541
5542 done_mask = ap->qc_active ^ qc_active;
5543
5544 if (unlikely(done_mask & qc_active)) {
5545 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5546 "(%08x->%08x)\n", ap->qc_active, qc_active);
5547 return -EINVAL;
5548 }
5549
5550 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5551 struct ata_queued_cmd *qc;
5552
5553 if (!(done_mask & (1 << i)))
5554 continue;
5555
5556 if ((qc = ata_qc_from_tag(ap, i))) {
5557 if (finish_qc)
5558 finish_qc(qc);
5559 ata_qc_complete(qc);
5560 nr_done++;
5561 }
5562 }
5563
5564 return nr_done;
5565}
5566
1da177e4
LT
5567static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5568{
5569 struct ata_port *ap = qc->ap;
5570
5571 switch (qc->tf.protocol) {
3dc1d881 5572 case ATA_PROT_NCQ:
1da177e4
LT
5573 case ATA_PROT_DMA:
5574 case ATA_PROT_ATAPI_DMA:
5575 return 1;
5576
5577 case ATA_PROT_ATAPI:
5578 case ATA_PROT_PIO:
1da177e4
LT
5579 if (ap->flags & ATA_FLAG_PIO_DMA)
5580 return 1;
5581
5582 /* fall through */
5583
5584 default:
5585 return 0;
5586 }
5587
5588 /* never reached */
5589}
5590
5591/**
5592 * ata_qc_issue - issue taskfile to device
5593 * @qc: command to issue to device
5594 *
5595 * Prepare an ATA command to submission to device.
5596 * This includes mapping the data into a DMA-able
5597 * area, filling in the S/G table, and finally
5598 * writing the taskfile to hardware, starting the command.
5599 *
5600 * LOCKING:
cca3974e 5601 * spin_lock_irqsave(host lock)
1da177e4 5602 */
8e0e694a 5603void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5604{
5605 struct ata_port *ap = qc->ap;
9af5c9c9 5606 struct ata_link *link = qc->dev->link;
1da177e4 5607
dedaf2b0
TH
5608 /* Make sure only one non-NCQ command is outstanding. The
5609 * check is skipped for old EH because it reuses active qc to
5610 * request ATAPI sense.
5611 */
9af5c9c9 5612 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5613
5614 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5615 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5616
5617 if (!link->sactive)
5618 ap->nr_active_links++;
9af5c9c9 5619 link->sactive |= 1 << qc->tag;
dedaf2b0 5620 } else {
9af5c9c9 5621 WARN_ON(link->sactive);
da917d69
TH
5622
5623 ap->nr_active_links++;
9af5c9c9 5624 link->active_tag = qc->tag;
dedaf2b0
TH
5625 }
5626
e4a70e76 5627 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5628 ap->qc_active |= 1 << qc->tag;
e4a70e76 5629
1da177e4
LT
5630 if (ata_should_dma_map(qc)) {
5631 if (qc->flags & ATA_QCFLAG_SG) {
5632 if (ata_sg_setup(qc))
8e436af9 5633 goto sg_err;
1da177e4
LT
5634 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5635 if (ata_sg_setup_one(qc))
8e436af9 5636 goto sg_err;
1da177e4
LT
5637 }
5638 } else {
5639 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5640 }
5641
5642 ap->ops->qc_prep(qc);
5643
8e0e694a
TH
5644 qc->err_mask |= ap->ops->qc_issue(qc);
5645 if (unlikely(qc->err_mask))
5646 goto err;
5647 return;
1da177e4 5648
8e436af9
TH
5649sg_err:
5650 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5651 qc->err_mask |= AC_ERR_SYSTEM;
5652err:
5653 ata_qc_complete(qc);
1da177e4
LT
5654}
5655
5656/**
5657 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5658 * @qc: command to issue to device
5659 *
5660 * Using various libata functions and hooks, this function
5661 * starts an ATA command. ATA commands are grouped into
5662 * classes called "protocols", and issuing each type of protocol
5663 * is slightly different.
5664 *
0baab86b
EF
5665 * May be used as the qc_issue() entry in ata_port_operations.
5666 *
1da177e4 5667 * LOCKING:
cca3974e 5668 * spin_lock_irqsave(host lock)
1da177e4
LT
5669 *
5670 * RETURNS:
9a3d9eb0 5671 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5672 */
5673
9a3d9eb0 5674unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5675{
5676 struct ata_port *ap = qc->ap;
5677
e50362ec
AL
5678 /* Use polling pio if the LLD doesn't handle
5679 * interrupt driven pio and atapi CDB interrupt.
5680 */
5681 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5682 switch (qc->tf.protocol) {
5683 case ATA_PROT_PIO:
e3472cbe 5684 case ATA_PROT_NODATA:
e50362ec
AL
5685 case ATA_PROT_ATAPI:
5686 case ATA_PROT_ATAPI_NODATA:
5687 qc->tf.flags |= ATA_TFLAG_POLLING;
5688 break;
5689 case ATA_PROT_ATAPI_DMA:
5690 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5691 /* see ata_dma_blacklisted() */
e50362ec
AL
5692 BUG();
5693 break;
5694 default:
5695 break;
5696 }
5697 }
5698
312f7da2 5699 /* select the device */
1da177e4
LT
5700 ata_dev_select(ap, qc->dev->devno, 1, 0);
5701
312f7da2 5702 /* start the command */
1da177e4
LT
5703 switch (qc->tf.protocol) {
5704 case ATA_PROT_NODATA:
312f7da2
AL
5705 if (qc->tf.flags & ATA_TFLAG_POLLING)
5706 ata_qc_set_polling(qc);
5707
e5338254 5708 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5709 ap->hsm_task_state = HSM_ST_LAST;
5710
5711 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5712 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5713
1da177e4
LT
5714 break;
5715
5716 case ATA_PROT_DMA:
587005de 5717 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5718
1da177e4
LT
5719 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5720 ap->ops->bmdma_setup(qc); /* set up bmdma */
5721 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5722 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5723 break;
5724
312f7da2
AL
5725 case ATA_PROT_PIO:
5726 if (qc->tf.flags & ATA_TFLAG_POLLING)
5727 ata_qc_set_polling(qc);
1da177e4 5728
e5338254 5729 ata_tf_to_host(ap, &qc->tf);
312f7da2 5730
54f00389
AL
5731 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5732 /* PIO data out protocol */
5733 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5734 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5735
5736 /* always send first data block using
e27486db 5737 * the ata_pio_task() codepath.
54f00389 5738 */
312f7da2 5739 } else {
54f00389
AL
5740 /* PIO data in protocol */
5741 ap->hsm_task_state = HSM_ST;
5742
5743 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5744 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5745
5746 /* if polling, ata_pio_task() handles the rest.
5747 * otherwise, interrupt handler takes over from here.
5748 */
312f7da2
AL
5749 }
5750
1da177e4
LT
5751 break;
5752
1da177e4 5753 case ATA_PROT_ATAPI:
1da177e4 5754 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5755 if (qc->tf.flags & ATA_TFLAG_POLLING)
5756 ata_qc_set_polling(qc);
5757
e5338254 5758 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5759
312f7da2
AL
5760 ap->hsm_task_state = HSM_ST_FIRST;
5761
5762 /* send cdb by polling if no cdb interrupt */
5763 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5764 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5765 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5766 break;
5767
5768 case ATA_PROT_ATAPI_DMA:
587005de 5769 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5770
1da177e4
LT
5771 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5772 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5773 ap->hsm_task_state = HSM_ST_FIRST;
5774
5775 /* send cdb by polling if no cdb interrupt */
5776 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5777 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5778 break;
5779
5780 default:
5781 WARN_ON(1);
9a3d9eb0 5782 return AC_ERR_SYSTEM;
1da177e4
LT
5783 }
5784
5785 return 0;
5786}
5787
1da177e4
LT
5788/**
5789 * ata_host_intr - Handle host interrupt for given (port, task)
5790 * @ap: Port on which interrupt arrived (possibly...)
5791 * @qc: Taskfile currently active in engine
5792 *
5793 * Handle host interrupt for given queued command. Currently,
5794 * only DMA interrupts are handled. All other commands are
5795 * handled via polling with interrupts disabled (nIEN bit).
5796 *
5797 * LOCKING:
cca3974e 5798 * spin_lock_irqsave(host lock)
1da177e4
LT
5799 *
5800 * RETURNS:
5801 * One if interrupt was handled, zero if not (shared irq).
5802 */
5803
5804inline unsigned int ata_host_intr (struct ata_port *ap,
5805 struct ata_queued_cmd *qc)
5806{
9af5c9c9 5807 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 5808 u8 status, host_stat = 0;
1da177e4 5809
312f7da2 5810 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5811 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5812
312f7da2
AL
5813 /* Check whether we are expecting interrupt in this state */
5814 switch (ap->hsm_task_state) {
5815 case HSM_ST_FIRST:
6912ccd5
AL
5816 /* Some pre-ATAPI-4 devices assert INTRQ
5817 * at this state when ready to receive CDB.
5818 */
1da177e4 5819
312f7da2
AL
5820 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5821 * The flag was turned on only for atapi devices.
5822 * No need to check is_atapi_taskfile(&qc->tf) again.
5823 */
5824 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5825 goto idle_irq;
1da177e4 5826 break;
312f7da2
AL
5827 case HSM_ST_LAST:
5828 if (qc->tf.protocol == ATA_PROT_DMA ||
5829 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5830 /* check status of DMA engine */
5831 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5832 VPRINTK("ata%u: host_stat 0x%X\n",
5833 ap->print_id, host_stat);
312f7da2
AL
5834
5835 /* if it's not our irq... */
5836 if (!(host_stat & ATA_DMA_INTR))
5837 goto idle_irq;
5838
5839 /* before we do anything else, clear DMA-Start bit */
5840 ap->ops->bmdma_stop(qc);
a4f16610
AL
5841
5842 if (unlikely(host_stat & ATA_DMA_ERR)) {
5843 /* error when transfering data to/from memory */
5844 qc->err_mask |= AC_ERR_HOST_BUS;
5845 ap->hsm_task_state = HSM_ST_ERR;
5846 }
312f7da2
AL
5847 }
5848 break;
5849 case HSM_ST:
5850 break;
1da177e4
LT
5851 default:
5852 goto idle_irq;
5853 }
5854
312f7da2
AL
5855 /* check altstatus */
5856 status = ata_altstatus(ap);
5857 if (status & ATA_BUSY)
5858 goto idle_irq;
1da177e4 5859
312f7da2
AL
5860 /* check main status, clearing INTRQ */
5861 status = ata_chk_status(ap);
5862 if (unlikely(status & ATA_BUSY))
5863 goto idle_irq;
1da177e4 5864
312f7da2
AL
5865 /* ack bmdma irq events */
5866 ap->ops->irq_clear(ap);
1da177e4 5867
bb5cb290 5868 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5869
5870 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5871 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5872 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5873
1da177e4
LT
5874 return 1; /* irq handled */
5875
5876idle_irq:
5877 ap->stats.idle_irq++;
5878
5879#ifdef ATA_IRQ_TRAP
5880 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
5881 ata_chk_status(ap);
5882 ap->ops->irq_clear(ap);
f15a1daf 5883 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5884 return 1;
1da177e4
LT
5885 }
5886#endif
5887 return 0; /* irq not handled */
5888}
5889
5890/**
5891 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5892 * @irq: irq line (unused)
cca3974e 5893 * @dev_instance: pointer to our ata_host information structure
1da177e4 5894 *
0cba632b
JG
5895 * Default interrupt handler for PCI IDE devices. Calls
5896 * ata_host_intr() for each port that is not disabled.
5897 *
1da177e4 5898 * LOCKING:
cca3974e 5899 * Obtains host lock during operation.
1da177e4
LT
5900 *
5901 * RETURNS:
0cba632b 5902 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5903 */
5904
7d12e780 5905irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5906{
cca3974e 5907 struct ata_host *host = dev_instance;
1da177e4
LT
5908 unsigned int i;
5909 unsigned int handled = 0;
5910 unsigned long flags;
5911
5912 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5913 spin_lock_irqsave(&host->lock, flags);
1da177e4 5914
cca3974e 5915 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5916 struct ata_port *ap;
5917
cca3974e 5918 ap = host->ports[i];
c1389503 5919 if (ap &&
029f5468 5920 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5921 struct ata_queued_cmd *qc;
5922
9af5c9c9 5923 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 5924 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5925 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5926 handled |= ata_host_intr(ap, qc);
5927 }
5928 }
5929
cca3974e 5930 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5931
5932 return IRQ_RETVAL(handled);
5933}
5934
34bf2170
TH
5935/**
5936 * sata_scr_valid - test whether SCRs are accessible
936fd732 5937 * @link: ATA link to test SCR accessibility for
34bf2170 5938 *
936fd732 5939 * Test whether SCRs are accessible for @link.
34bf2170
TH
5940 *
5941 * LOCKING:
5942 * None.
5943 *
5944 * RETURNS:
5945 * 1 if SCRs are accessible, 0 otherwise.
5946 */
936fd732 5947int sata_scr_valid(struct ata_link *link)
34bf2170 5948{
936fd732
TH
5949 struct ata_port *ap = link->ap;
5950
a16abc0b 5951 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
5952}
5953
5954/**
5955 * sata_scr_read - read SCR register of the specified port
936fd732 5956 * @link: ATA link to read SCR for
34bf2170
TH
5957 * @reg: SCR to read
5958 * @val: Place to store read value
5959 *
936fd732 5960 * Read SCR register @reg of @link into *@val. This function is
34bf2170
TH
5961 * guaranteed to succeed if the cable type of the port is SATA
5962 * and the port implements ->scr_read.
5963 *
5964 * LOCKING:
5965 * None.
5966 *
5967 * RETURNS:
5968 * 0 on success, negative errno on failure.
5969 */
936fd732 5970int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 5971{
936fd732
TH
5972 struct ata_port *ap = link->ap;
5973
5974 if (sata_scr_valid(link))
da3dbb17 5975 return ap->ops->scr_read(ap, reg, val);
34bf2170
TH
5976 return -EOPNOTSUPP;
5977}
5978
5979/**
5980 * sata_scr_write - write SCR register of the specified port
936fd732 5981 * @link: ATA link to write SCR for
34bf2170
TH
5982 * @reg: SCR to write
5983 * @val: value to write
5984 *
936fd732 5985 * Write @val to SCR register @reg of @link. This function is
34bf2170
TH
5986 * guaranteed to succeed if the cable type of the port is SATA
5987 * and the port implements ->scr_read.
5988 *
5989 * LOCKING:
5990 * None.
5991 *
5992 * RETURNS:
5993 * 0 on success, negative errno on failure.
5994 */
936fd732 5995int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 5996{
936fd732
TH
5997 struct ata_port *ap = link->ap;
5998
5999 if (sata_scr_valid(link))
da3dbb17 6000 return ap->ops->scr_write(ap, reg, val);
34bf2170
TH
6001 return -EOPNOTSUPP;
6002}
6003
6004/**
6005 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6006 * @link: ATA link to write SCR for
34bf2170
TH
6007 * @reg: SCR to write
6008 * @val: value to write
6009 *
6010 * This function is identical to sata_scr_write() except that this
6011 * function performs flush after writing to the register.
6012 *
6013 * LOCKING:
6014 * None.
6015 *
6016 * RETURNS:
6017 * 0 on success, negative errno on failure.
6018 */
936fd732 6019int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6020{
936fd732 6021 struct ata_port *ap = link->ap;
da3dbb17
TH
6022 int rc;
6023
936fd732 6024 if (sata_scr_valid(link)) {
da3dbb17
TH
6025 rc = ap->ops->scr_write(ap, reg, val);
6026 if (rc == 0)
6027 rc = ap->ops->scr_read(ap, reg, &val);
6028 return rc;
34bf2170
TH
6029 }
6030 return -EOPNOTSUPP;
6031}
6032
6033/**
936fd732
TH
6034 * ata_link_online - test whether the given link is online
6035 * @link: ATA link to test
34bf2170 6036 *
936fd732
TH
6037 * Test whether @link is online. Note that this function returns
6038 * 0 if online status of @link cannot be obtained, so
6039 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6040 *
6041 * LOCKING:
6042 * None.
6043 *
6044 * RETURNS:
6045 * 1 if the port online status is available and online.
6046 */
936fd732 6047int ata_link_online(struct ata_link *link)
34bf2170
TH
6048{
6049 u32 sstatus;
6050
936fd732
TH
6051 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6052 (sstatus & 0xf) == 0x3)
34bf2170
TH
6053 return 1;
6054 return 0;
6055}
6056
6057/**
936fd732
TH
6058 * ata_link_offline - test whether the given link is offline
6059 * @link: ATA link to test
34bf2170 6060 *
936fd732
TH
6061 * Test whether @link is offline. Note that this function
6062 * returns 0 if offline status of @link cannot be obtained, so
6063 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6064 *
6065 * LOCKING:
6066 * None.
6067 *
6068 * RETURNS:
6069 * 1 if the port offline status is available and offline.
6070 */
936fd732 6071int ata_link_offline(struct ata_link *link)
34bf2170
TH
6072{
6073 u32 sstatus;
6074
936fd732
TH
6075 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6076 (sstatus & 0xf) != 0x3)
34bf2170
TH
6077 return 1;
6078 return 0;
6079}
0baab86b 6080
77b08fb5 6081int ata_flush_cache(struct ata_device *dev)
9b847548 6082{
977e6b9f 6083 unsigned int err_mask;
9b847548
JA
6084 u8 cmd;
6085
6086 if (!ata_try_flush_cache(dev))
6087 return 0;
6088
6fc49adb 6089 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6090 cmd = ATA_CMD_FLUSH_EXT;
6091 else
6092 cmd = ATA_CMD_FLUSH;
6093
4f34337b
AC
6094 /* This is wrong. On a failed flush we get back the LBA of the lost
6095 sector and we should (assuming it wasn't aborted as unknown) issue
6096 a further flush command to continue the writeback until it
6097 does not error */
977e6b9f
TH
6098 err_mask = ata_do_simple_cmd(dev, cmd);
6099 if (err_mask) {
6100 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6101 return -EIO;
6102 }
6103
6104 return 0;
9b847548
JA
6105}
6106
6ffa01d8 6107#ifdef CONFIG_PM
cca3974e
JG
6108static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6109 unsigned int action, unsigned int ehi_flags,
6110 int wait)
500530f6
TH
6111{
6112 unsigned long flags;
6113 int i, rc;
6114
cca3974e
JG
6115 for (i = 0; i < host->n_ports; i++) {
6116 struct ata_port *ap = host->ports[i];
e3667ebf 6117 struct ata_link *link;
500530f6
TH
6118
6119 /* Previous resume operation might still be in
6120 * progress. Wait for PM_PENDING to clear.
6121 */
6122 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6123 ata_port_wait_eh(ap);
6124 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6125 }
6126
6127 /* request PM ops to EH */
6128 spin_lock_irqsave(ap->lock, flags);
6129
6130 ap->pm_mesg = mesg;
6131 if (wait) {
6132 rc = 0;
6133 ap->pm_result = &rc;
6134 }
6135
6136 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6137 __ata_port_for_each_link(link, ap) {
6138 link->eh_info.action |= action;
6139 link->eh_info.flags |= ehi_flags;
6140 }
500530f6
TH
6141
6142 ata_port_schedule_eh(ap);
6143
6144 spin_unlock_irqrestore(ap->lock, flags);
6145
6146 /* wait and check result */
6147 if (wait) {
6148 ata_port_wait_eh(ap);
6149 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6150 if (rc)
6151 return rc;
6152 }
6153 }
6154
6155 return 0;
6156}
6157
6158/**
cca3974e
JG
6159 * ata_host_suspend - suspend host
6160 * @host: host to suspend
500530f6
TH
6161 * @mesg: PM message
6162 *
cca3974e 6163 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6164 * function requests EH to perform PM operations and waits for EH
6165 * to finish.
6166 *
6167 * LOCKING:
6168 * Kernel thread context (may sleep).
6169 *
6170 * RETURNS:
6171 * 0 on success, -errno on failure.
6172 */
cca3974e 6173int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6174{
9666f400 6175 int rc;
500530f6 6176
cca3974e 6177 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6178 if (rc == 0)
6179 host->dev->power.power_state = mesg;
500530f6
TH
6180 return rc;
6181}
6182
6183/**
cca3974e
JG
6184 * ata_host_resume - resume host
6185 * @host: host to resume
500530f6 6186 *
cca3974e 6187 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6188 * function requests EH to perform PM operations and returns.
6189 * Note that all resume operations are performed parallely.
6190 *
6191 * LOCKING:
6192 * Kernel thread context (may sleep).
6193 */
cca3974e 6194void ata_host_resume(struct ata_host *host)
500530f6 6195{
cca3974e
JG
6196 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6197 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6198 host->dev->power.power_state = PMSG_ON;
500530f6 6199}
6ffa01d8 6200#endif
500530f6 6201
c893a3ae
RD
6202/**
6203 * ata_port_start - Set port up for dma.
6204 * @ap: Port to initialize
6205 *
6206 * Called just after data structures for each port are
6207 * initialized. Allocates space for PRD table.
6208 *
6209 * May be used as the port_start() entry in ata_port_operations.
6210 *
6211 * LOCKING:
6212 * Inherited from caller.
6213 */
f0d36efd 6214int ata_port_start(struct ata_port *ap)
1da177e4 6215{
2f1f610b 6216 struct device *dev = ap->dev;
6037d6bb 6217 int rc;
1da177e4 6218
f0d36efd
TH
6219 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6220 GFP_KERNEL);
1da177e4
LT
6221 if (!ap->prd)
6222 return -ENOMEM;
6223
6037d6bb 6224 rc = ata_pad_alloc(ap, dev);
f0d36efd 6225 if (rc)
6037d6bb 6226 return rc;
1da177e4 6227
f0d36efd
TH
6228 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6229 (unsigned long long)ap->prd_dma);
1da177e4
LT
6230 return 0;
6231}
6232
3ef3b43d
TH
6233/**
6234 * ata_dev_init - Initialize an ata_device structure
6235 * @dev: Device structure to initialize
6236 *
6237 * Initialize @dev in preparation for probing.
6238 *
6239 * LOCKING:
6240 * Inherited from caller.
6241 */
6242void ata_dev_init(struct ata_device *dev)
6243{
9af5c9c9
TH
6244 struct ata_link *link = dev->link;
6245 struct ata_port *ap = link->ap;
72fa4b74
TH
6246 unsigned long flags;
6247
5a04bf4b 6248 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6249 link->sata_spd_limit = link->hw_sata_spd_limit;
6250 link->sata_spd = 0;
5a04bf4b 6251
72fa4b74
TH
6252 /* High bits of dev->flags are used to record warm plug
6253 * requests which occur asynchronously. Synchronize using
cca3974e 6254 * host lock.
72fa4b74 6255 */
ba6a1308 6256 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6257 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6258 dev->horkage = 0;
ba6a1308 6259 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6260
72fa4b74
TH
6261 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6262 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6263 dev->pio_mask = UINT_MAX;
6264 dev->mwdma_mask = UINT_MAX;
6265 dev->udma_mask = UINT_MAX;
6266}
6267
4fb37a25
TH
6268/**
6269 * ata_link_init - Initialize an ata_link structure
6270 * @ap: ATA port link is attached to
6271 * @link: Link structure to initialize
8989805d 6272 * @pmp: Port multiplier port number
4fb37a25
TH
6273 *
6274 * Initialize @link.
6275 *
6276 * LOCKING:
6277 * Kernel thread context (may sleep)
6278 */
fb7fd614 6279void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6280{
6281 int i;
6282
6283 /* clear everything except for devices */
6284 memset(link, 0, offsetof(struct ata_link, device[0]));
6285
6286 link->ap = ap;
8989805d 6287 link->pmp = pmp;
4fb37a25
TH
6288 link->active_tag = ATA_TAG_POISON;
6289 link->hw_sata_spd_limit = UINT_MAX;
6290
6291 /* can't use iterator, ap isn't initialized yet */
6292 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6293 struct ata_device *dev = &link->device[i];
6294
6295 dev->link = link;
6296 dev->devno = dev - link->device;
6297 ata_dev_init(dev);
6298 }
6299}
6300
6301/**
6302 * sata_link_init_spd - Initialize link->sata_spd_limit
6303 * @link: Link to configure sata_spd_limit for
6304 *
6305 * Initialize @link->[hw_]sata_spd_limit to the currently
6306 * configured value.
6307 *
6308 * LOCKING:
6309 * Kernel thread context (may sleep).
6310 *
6311 * RETURNS:
6312 * 0 on success, -errno on failure.
6313 */
fb7fd614 6314int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6315{
6316 u32 scontrol, spd;
6317 int rc;
6318
6319 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6320 if (rc)
6321 return rc;
6322
6323 spd = (scontrol >> 4) & 0xf;
6324 if (spd)
6325 link->hw_sata_spd_limit &= (1 << spd) - 1;
6326
6327 link->sata_spd_limit = link->hw_sata_spd_limit;
6328
6329 return 0;
6330}
6331
1da177e4 6332/**
f3187195
TH
6333 * ata_port_alloc - allocate and initialize basic ATA port resources
6334 * @host: ATA host this allocated port belongs to
1da177e4 6335 *
f3187195
TH
6336 * Allocate and initialize basic ATA port resources.
6337 *
6338 * RETURNS:
6339 * Allocate ATA port on success, NULL on failure.
0cba632b 6340 *
1da177e4 6341 * LOCKING:
f3187195 6342 * Inherited from calling layer (may sleep).
1da177e4 6343 */
f3187195 6344struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6345{
f3187195 6346 struct ata_port *ap;
1da177e4 6347
f3187195
TH
6348 DPRINTK("ENTER\n");
6349
6350 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6351 if (!ap)
6352 return NULL;
6353
f4d6d004 6354 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6355 ap->lock = &host->lock;
198e0fed 6356 ap->flags = ATA_FLAG_DISABLED;
f3187195 6357 ap->print_id = -1;
1da177e4 6358 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6359 ap->host = host;
f3187195 6360 ap->dev = host->dev;
1da177e4 6361 ap->last_ctl = 0xFF;
bd5d825c
BP
6362
6363#if defined(ATA_VERBOSE_DEBUG)
6364 /* turn on all debugging levels */
6365 ap->msg_enable = 0x00FF;
6366#elif defined(ATA_DEBUG)
6367 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6368#else
0dd4b21f 6369 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6370#endif
1da177e4 6371
65f27f38
DH
6372 INIT_DELAYED_WORK(&ap->port_task, NULL);
6373 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6374 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6375 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6376 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6377 init_timer_deferrable(&ap->fastdrain_timer);
6378 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6379 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6380
838df628 6381 ap->cbl = ATA_CBL_NONE;
838df628 6382
8989805d 6383 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6384
6385#ifdef ATA_IRQ_TRAP
6386 ap->stats.unhandled_irq = 1;
6387 ap->stats.idle_irq = 1;
6388#endif
1da177e4 6389 return ap;
1da177e4
LT
6390}
6391
f0d36efd
TH
6392static void ata_host_release(struct device *gendev, void *res)
6393{
6394 struct ata_host *host = dev_get_drvdata(gendev);
6395 int i;
6396
6397 for (i = 0; i < host->n_ports; i++) {
6398 struct ata_port *ap = host->ports[i];
6399
ecef7253
TH
6400 if (!ap)
6401 continue;
6402
6403 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6404 ap->ops->port_stop(ap);
f0d36efd
TH
6405 }
6406
ecef7253 6407 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6408 host->ops->host_stop(host);
1aa56cca 6409
1aa506e4
TH
6410 for (i = 0; i < host->n_ports; i++) {
6411 struct ata_port *ap = host->ports[i];
6412
4911487a
TH
6413 if (!ap)
6414 continue;
6415
6416 if (ap->scsi_host)
1aa506e4
TH
6417 scsi_host_put(ap->scsi_host);
6418
4911487a 6419 kfree(ap);
1aa506e4
TH
6420 host->ports[i] = NULL;
6421 }
6422
1aa56cca 6423 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6424}
6425
f3187195
TH
6426/**
6427 * ata_host_alloc - allocate and init basic ATA host resources
6428 * @dev: generic device this host is associated with
6429 * @max_ports: maximum number of ATA ports associated with this host
6430 *
6431 * Allocate and initialize basic ATA host resources. LLD calls
6432 * this function to allocate a host, initializes it fully and
6433 * attaches it using ata_host_register().
6434 *
6435 * @max_ports ports are allocated and host->n_ports is
6436 * initialized to @max_ports. The caller is allowed to decrease
6437 * host->n_ports before calling ata_host_register(). The unused
6438 * ports will be automatically freed on registration.
6439 *
6440 * RETURNS:
6441 * Allocate ATA host on success, NULL on failure.
6442 *
6443 * LOCKING:
6444 * Inherited from calling layer (may sleep).
6445 */
6446struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6447{
6448 struct ata_host *host;
6449 size_t sz;
6450 int i;
6451
6452 DPRINTK("ENTER\n");
6453
6454 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6455 return NULL;
6456
6457 /* alloc a container for our list of ATA ports (buses) */
6458 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6459 /* alloc a container for our list of ATA ports (buses) */
6460 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6461 if (!host)
6462 goto err_out;
6463
6464 devres_add(dev, host);
6465 dev_set_drvdata(dev, host);
6466
6467 spin_lock_init(&host->lock);
6468 host->dev = dev;
6469 host->n_ports = max_ports;
6470
6471 /* allocate ports bound to this host */
6472 for (i = 0; i < max_ports; i++) {
6473 struct ata_port *ap;
6474
6475 ap = ata_port_alloc(host);
6476 if (!ap)
6477 goto err_out;
6478
6479 ap->port_no = i;
6480 host->ports[i] = ap;
6481 }
6482
6483 devres_remove_group(dev, NULL);
6484 return host;
6485
6486 err_out:
6487 devres_release_group(dev, NULL);
6488 return NULL;
6489}
6490
f5cda257
TH
6491/**
6492 * ata_host_alloc_pinfo - alloc host and init with port_info array
6493 * @dev: generic device this host is associated with
6494 * @ppi: array of ATA port_info to initialize host with
6495 * @n_ports: number of ATA ports attached to this host
6496 *
6497 * Allocate ATA host and initialize with info from @ppi. If NULL
6498 * terminated, @ppi may contain fewer entries than @n_ports. The
6499 * last entry will be used for the remaining ports.
6500 *
6501 * RETURNS:
6502 * Allocate ATA host on success, NULL on failure.
6503 *
6504 * LOCKING:
6505 * Inherited from calling layer (may sleep).
6506 */
6507struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6508 const struct ata_port_info * const * ppi,
6509 int n_ports)
6510{
6511 const struct ata_port_info *pi;
6512 struct ata_host *host;
6513 int i, j;
6514
6515 host = ata_host_alloc(dev, n_ports);
6516 if (!host)
6517 return NULL;
6518
6519 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6520 struct ata_port *ap = host->ports[i];
6521
6522 if (ppi[j])
6523 pi = ppi[j++];
6524
6525 ap->pio_mask = pi->pio_mask;
6526 ap->mwdma_mask = pi->mwdma_mask;
6527 ap->udma_mask = pi->udma_mask;
6528 ap->flags |= pi->flags;
0c88758b 6529 ap->link.flags |= pi->link_flags;
f5cda257
TH
6530 ap->ops = pi->port_ops;
6531
6532 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6533 host->ops = pi->port_ops;
6534 if (!host->private_data && pi->private_data)
6535 host->private_data = pi->private_data;
6536 }
6537
6538 return host;
6539}
6540
ecef7253
TH
6541/**
6542 * ata_host_start - start and freeze ports of an ATA host
6543 * @host: ATA host to start ports for
6544 *
6545 * Start and then freeze ports of @host. Started status is
6546 * recorded in host->flags, so this function can be called
6547 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6548 * once. If host->ops isn't initialized yet, its set to the
6549 * first non-dummy port ops.
ecef7253
TH
6550 *
6551 * LOCKING:
6552 * Inherited from calling layer (may sleep).
6553 *
6554 * RETURNS:
6555 * 0 if all ports are started successfully, -errno otherwise.
6556 */
6557int ata_host_start(struct ata_host *host)
6558{
6559 int i, rc;
6560
6561 if (host->flags & ATA_HOST_STARTED)
6562 return 0;
6563
6564 for (i = 0; i < host->n_ports; i++) {
6565 struct ata_port *ap = host->ports[i];
6566
f3187195
TH
6567 if (!host->ops && !ata_port_is_dummy(ap))
6568 host->ops = ap->ops;
6569
ecef7253
TH
6570 if (ap->ops->port_start) {
6571 rc = ap->ops->port_start(ap);
6572 if (rc) {
6573 ata_port_printk(ap, KERN_ERR, "failed to "
6574 "start port (errno=%d)\n", rc);
6575 goto err_out;
6576 }
6577 }
6578
6579 ata_eh_freeze_port(ap);
6580 }
6581
6582 host->flags |= ATA_HOST_STARTED;
6583 return 0;
6584
6585 err_out:
6586 while (--i >= 0) {
6587 struct ata_port *ap = host->ports[i];
6588
6589 if (ap->ops->port_stop)
6590 ap->ops->port_stop(ap);
6591 }
6592 return rc;
6593}
6594
b03732f0 6595/**
cca3974e
JG
6596 * ata_sas_host_init - Initialize a host struct
6597 * @host: host to initialize
6598 * @dev: device host is attached to
6599 * @flags: host flags
6600 * @ops: port_ops
b03732f0
BK
6601 *
6602 * LOCKING:
6603 * PCI/etc. bus probe sem.
6604 *
6605 */
f3187195 6606/* KILLME - the only user left is ipr */
cca3974e
JG
6607void ata_host_init(struct ata_host *host, struct device *dev,
6608 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6609{
cca3974e
JG
6610 spin_lock_init(&host->lock);
6611 host->dev = dev;
6612 host->flags = flags;
6613 host->ops = ops;
b03732f0
BK
6614}
6615
f3187195
TH
6616/**
6617 * ata_host_register - register initialized ATA host
6618 * @host: ATA host to register
6619 * @sht: template for SCSI host
6620 *
6621 * Register initialized ATA host. @host is allocated using
6622 * ata_host_alloc() and fully initialized by LLD. This function
6623 * starts ports, registers @host with ATA and SCSI layers and
6624 * probe registered devices.
6625 *
6626 * LOCKING:
6627 * Inherited from calling layer (may sleep).
6628 *
6629 * RETURNS:
6630 * 0 on success, -errno otherwise.
6631 */
6632int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6633{
6634 int i, rc;
6635
6636 /* host must have been started */
6637 if (!(host->flags & ATA_HOST_STARTED)) {
6638 dev_printk(KERN_ERR, host->dev,
6639 "BUG: trying to register unstarted host\n");
6640 WARN_ON(1);
6641 return -EINVAL;
6642 }
6643
6644 /* Blow away unused ports. This happens when LLD can't
6645 * determine the exact number of ports to allocate at
6646 * allocation time.
6647 */
6648 for (i = host->n_ports; host->ports[i]; i++)
6649 kfree(host->ports[i]);
6650
6651 /* give ports names and add SCSI hosts */
6652 for (i = 0; i < host->n_ports; i++)
6653 host->ports[i]->print_id = ata_print_id++;
6654
6655 rc = ata_scsi_add_hosts(host, sht);
6656 if (rc)
6657 return rc;
6658
fafbae87
TH
6659 /* associate with ACPI nodes */
6660 ata_acpi_associate(host);
6661
f3187195
TH
6662 /* set cable, sata_spd_limit and report */
6663 for (i = 0; i < host->n_ports; i++) {
6664 struct ata_port *ap = host->ports[i];
f3187195
TH
6665 unsigned long xfer_mask;
6666
6667 /* set SATA cable type if still unset */
6668 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6669 ap->cbl = ATA_CBL_SATA;
6670
6671 /* init sata_spd_limit to the current value */
4fb37a25 6672 sata_link_init_spd(&ap->link);
f3187195 6673
cbcdd875 6674 /* print per-port info to dmesg */
f3187195
TH
6675 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6676 ap->udma_mask);
6677
f3187195 6678 if (!ata_port_is_dummy(ap))
cbcdd875
TH
6679 ata_port_printk(ap, KERN_INFO,
6680 "%cATA max %s %s\n",
a16abc0b 6681 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 6682 ata_mode_string(xfer_mask),
cbcdd875 6683 ap->link.eh_info.desc);
f3187195
TH
6684 else
6685 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6686 }
6687
6688 /* perform each probe synchronously */
6689 DPRINTK("probe begin\n");
6690 for (i = 0; i < host->n_ports; i++) {
6691 struct ata_port *ap = host->ports[i];
6692 int rc;
6693
6694 /* probe */
6695 if (ap->ops->error_handler) {
9af5c9c9 6696 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
6697 unsigned long flags;
6698
6699 ata_port_probe(ap);
6700
6701 /* kick EH for boot probing */
6702 spin_lock_irqsave(ap->lock, flags);
6703
f58229f8
TH
6704 ehi->probe_mask =
6705 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
6706 ehi->action |= ATA_EH_SOFTRESET;
6707 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6708
f4d6d004 6709 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6710 ap->pflags |= ATA_PFLAG_LOADING;
6711 ata_port_schedule_eh(ap);
6712
6713 spin_unlock_irqrestore(ap->lock, flags);
6714
6715 /* wait for EH to finish */
6716 ata_port_wait_eh(ap);
6717 } else {
6718 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6719 rc = ata_bus_probe(ap);
6720 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6721
6722 if (rc) {
6723 /* FIXME: do something useful here?
6724 * Current libata behavior will
6725 * tear down everything when
6726 * the module is removed
6727 * or the h/w is unplugged.
6728 */
6729 }
6730 }
6731 }
6732
6733 /* probes are done, now scan each port's disk(s) */
6734 DPRINTK("host probe begin\n");
6735 for (i = 0; i < host->n_ports; i++) {
6736 struct ata_port *ap = host->ports[i];
6737
1ae46317 6738 ata_scsi_scan_host(ap, 1);
f3187195
TH
6739 }
6740
6741 return 0;
6742}
6743
f5cda257
TH
6744/**
6745 * ata_host_activate - start host, request IRQ and register it
6746 * @host: target ATA host
6747 * @irq: IRQ to request
6748 * @irq_handler: irq_handler used when requesting IRQ
6749 * @irq_flags: irq_flags used when requesting IRQ
6750 * @sht: scsi_host_template to use when registering the host
6751 *
6752 * After allocating an ATA host and initializing it, most libata
6753 * LLDs perform three steps to activate the host - start host,
6754 * request IRQ and register it. This helper takes necessasry
6755 * arguments and performs the three steps in one go.
6756 *
6757 * LOCKING:
6758 * Inherited from calling layer (may sleep).
6759 *
6760 * RETURNS:
6761 * 0 on success, -errno otherwise.
6762 */
6763int ata_host_activate(struct ata_host *host, int irq,
6764 irq_handler_t irq_handler, unsigned long irq_flags,
6765 struct scsi_host_template *sht)
6766{
cbcdd875 6767 int i, rc;
f5cda257
TH
6768
6769 rc = ata_host_start(host);
6770 if (rc)
6771 return rc;
6772
6773 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6774 dev_driver_string(host->dev), host);
6775 if (rc)
6776 return rc;
6777
cbcdd875
TH
6778 for (i = 0; i < host->n_ports; i++)
6779 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6780
f5cda257
TH
6781 rc = ata_host_register(host, sht);
6782 /* if failed, just free the IRQ and leave ports alone */
6783 if (rc)
6784 devm_free_irq(host->dev, irq, host);
6785
6786 return rc;
6787}
6788
720ba126
TH
6789/**
6790 * ata_port_detach - Detach ATA port in prepration of device removal
6791 * @ap: ATA port to be detached
6792 *
6793 * Detach all ATA devices and the associated SCSI devices of @ap;
6794 * then, remove the associated SCSI host. @ap is guaranteed to
6795 * be quiescent on return from this function.
6796 *
6797 * LOCKING:
6798 * Kernel thread context (may sleep).
6799 */
6800void ata_port_detach(struct ata_port *ap)
6801{
6802 unsigned long flags;
41bda9c9 6803 struct ata_link *link;
f58229f8 6804 struct ata_device *dev;
720ba126
TH
6805
6806 if (!ap->ops->error_handler)
c3cf30a9 6807 goto skip_eh;
720ba126
TH
6808
6809 /* tell EH we're leaving & flush EH */
ba6a1308 6810 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6811 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6812 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6813
6814 ata_port_wait_eh(ap);
6815
6816 /* EH is now guaranteed to see UNLOADING, so no new device
6817 * will be attached. Disable all existing devices.
6818 */
ba6a1308 6819 spin_lock_irqsave(ap->lock, flags);
720ba126 6820
41bda9c9
TH
6821 ata_port_for_each_link(link, ap) {
6822 ata_link_for_each_dev(dev, link)
6823 ata_dev_disable(dev);
6824 }
720ba126 6825
ba6a1308 6826 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6827
6828 /* Final freeze & EH. All in-flight commands are aborted. EH
6829 * will be skipped and retrials will be terminated with bad
6830 * target.
6831 */
ba6a1308 6832 spin_lock_irqsave(ap->lock, flags);
720ba126 6833 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6834 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6835
6836 ata_port_wait_eh(ap);
45a66c1c 6837 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 6838
c3cf30a9 6839 skip_eh:
720ba126 6840 /* remove the associated SCSI host */
cca3974e 6841 scsi_remove_host(ap->scsi_host);
720ba126
TH
6842}
6843
0529c159
TH
6844/**
6845 * ata_host_detach - Detach all ports of an ATA host
6846 * @host: Host to detach
6847 *
6848 * Detach all ports of @host.
6849 *
6850 * LOCKING:
6851 * Kernel thread context (may sleep).
6852 */
6853void ata_host_detach(struct ata_host *host)
6854{
6855 int i;
6856
6857 for (i = 0; i < host->n_ports; i++)
6858 ata_port_detach(host->ports[i]);
6859}
6860
1da177e4
LT
6861/**
6862 * ata_std_ports - initialize ioaddr with standard port offsets.
6863 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6864 *
6865 * Utility function which initializes data_addr, error_addr,
6866 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6867 * device_addr, status_addr, and command_addr to standard offsets
6868 * relative to cmd_addr.
6869 *
6870 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6871 */
0baab86b 6872
1da177e4
LT
6873void ata_std_ports(struct ata_ioports *ioaddr)
6874{
6875 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6876 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6877 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6878 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6879 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6880 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6881 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6882 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6883 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6884 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6885}
6886
0baab86b 6887
374b1873
JG
6888#ifdef CONFIG_PCI
6889
1da177e4
LT
6890/**
6891 * ata_pci_remove_one - PCI layer callback for device removal
6892 * @pdev: PCI device that was removed
6893 *
b878ca5d
TH
6894 * PCI layer indicates to libata via this hook that hot-unplug or
6895 * module unload event has occurred. Detach all ports. Resource
6896 * release is handled via devres.
1da177e4
LT
6897 *
6898 * LOCKING:
6899 * Inherited from PCI layer (may sleep).
6900 */
f0d36efd 6901void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6902{
6903 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6904 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6905
b878ca5d 6906 ata_host_detach(host);
1da177e4
LT
6907}
6908
6909/* move to PCI subsystem */
057ace5e 6910int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6911{
6912 unsigned long tmp = 0;
6913
6914 switch (bits->width) {
6915 case 1: {
6916 u8 tmp8 = 0;
6917 pci_read_config_byte(pdev, bits->reg, &tmp8);
6918 tmp = tmp8;
6919 break;
6920 }
6921 case 2: {
6922 u16 tmp16 = 0;
6923 pci_read_config_word(pdev, bits->reg, &tmp16);
6924 tmp = tmp16;
6925 break;
6926 }
6927 case 4: {
6928 u32 tmp32 = 0;
6929 pci_read_config_dword(pdev, bits->reg, &tmp32);
6930 tmp = tmp32;
6931 break;
6932 }
6933
6934 default:
6935 return -EINVAL;
6936 }
6937
6938 tmp &= bits->mask;
6939
6940 return (tmp == bits->val) ? 1 : 0;
6941}
9b847548 6942
6ffa01d8 6943#ifdef CONFIG_PM
3c5100c1 6944void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6945{
6946 pci_save_state(pdev);
4c90d971 6947 pci_disable_device(pdev);
500530f6 6948
4c90d971 6949 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6950 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6951}
6952
553c4aa6 6953int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6954{
553c4aa6
TH
6955 int rc;
6956
9b847548
JA
6957 pci_set_power_state(pdev, PCI_D0);
6958 pci_restore_state(pdev);
553c4aa6 6959
b878ca5d 6960 rc = pcim_enable_device(pdev);
553c4aa6
TH
6961 if (rc) {
6962 dev_printk(KERN_ERR, &pdev->dev,
6963 "failed to enable device after resume (%d)\n", rc);
6964 return rc;
6965 }
6966
9b847548 6967 pci_set_master(pdev);
553c4aa6 6968 return 0;
500530f6
TH
6969}
6970
3c5100c1 6971int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6972{
cca3974e 6973 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6974 int rc = 0;
6975
cca3974e 6976 rc = ata_host_suspend(host, mesg);
500530f6
TH
6977 if (rc)
6978 return rc;
6979
3c5100c1 6980 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6981
6982 return 0;
6983}
6984
6985int ata_pci_device_resume(struct pci_dev *pdev)
6986{
cca3974e 6987 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6988 int rc;
500530f6 6989
553c4aa6
TH
6990 rc = ata_pci_device_do_resume(pdev);
6991 if (rc == 0)
6992 ata_host_resume(host);
6993 return rc;
9b847548 6994}
6ffa01d8
TH
6995#endif /* CONFIG_PM */
6996
1da177e4
LT
6997#endif /* CONFIG_PCI */
6998
6999
1da177e4
LT
7000static int __init ata_init(void)
7001{
a8601e5f 7002 ata_probe_timeout *= HZ;
1da177e4
LT
7003 ata_wq = create_workqueue("ata");
7004 if (!ata_wq)
7005 return -ENOMEM;
7006
453b07ac
TH
7007 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7008 if (!ata_aux_wq) {
7009 destroy_workqueue(ata_wq);
7010 return -ENOMEM;
7011 }
7012
1da177e4
LT
7013 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7014 return 0;
7015}
7016
7017static void __exit ata_exit(void)
7018{
7019 destroy_workqueue(ata_wq);
453b07ac 7020 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7021}
7022
a4625085 7023subsys_initcall(ata_init);
1da177e4
LT
7024module_exit(ata_exit);
7025
67846b30 7026static unsigned long ratelimit_time;
34af946a 7027static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7028
7029int ata_ratelimit(void)
7030{
7031 int rc;
7032 unsigned long flags;
7033
7034 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7035
7036 if (time_after(jiffies, ratelimit_time)) {
7037 rc = 1;
7038 ratelimit_time = jiffies + (HZ/5);
7039 } else
7040 rc = 0;
7041
7042 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7043
7044 return rc;
7045}
7046
c22daff4
TH
7047/**
7048 * ata_wait_register - wait until register value changes
7049 * @reg: IO-mapped register
7050 * @mask: Mask to apply to read register value
7051 * @val: Wait condition
7052 * @interval_msec: polling interval in milliseconds
7053 * @timeout_msec: timeout in milliseconds
7054 *
7055 * Waiting for some bits of register to change is a common
7056 * operation for ATA controllers. This function reads 32bit LE
7057 * IO-mapped register @reg and tests for the following condition.
7058 *
7059 * (*@reg & mask) != val
7060 *
7061 * If the condition is met, it returns; otherwise, the process is
7062 * repeated after @interval_msec until timeout.
7063 *
7064 * LOCKING:
7065 * Kernel thread context (may sleep)
7066 *
7067 * RETURNS:
7068 * The final register value.
7069 */
7070u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7071 unsigned long interval_msec,
7072 unsigned long timeout_msec)
7073{
7074 unsigned long timeout;
7075 u32 tmp;
7076
7077 tmp = ioread32(reg);
7078
7079 /* Calculate timeout _after_ the first read to make sure
7080 * preceding writes reach the controller before starting to
7081 * eat away the timeout.
7082 */
7083 timeout = jiffies + (timeout_msec * HZ) / 1000;
7084
7085 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7086 msleep(interval_msec);
7087 tmp = ioread32(reg);
7088 }
7089
7090 return tmp;
7091}
7092
dd5b06c4
TH
7093/*
7094 * Dummy port_ops
7095 */
7096static void ata_dummy_noret(struct ata_port *ap) { }
7097static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7098static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7099
7100static u8 ata_dummy_check_status(struct ata_port *ap)
7101{
7102 return ATA_DRDY;
7103}
7104
7105static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7106{
7107 return AC_ERR_SYSTEM;
7108}
7109
7110const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7111 .check_status = ata_dummy_check_status,
7112 .check_altstatus = ata_dummy_check_status,
7113 .dev_select = ata_noop_dev_select,
7114 .qc_prep = ata_noop_qc_prep,
7115 .qc_issue = ata_dummy_qc_issue,
7116 .freeze = ata_dummy_noret,
7117 .thaw = ata_dummy_noret,
7118 .error_handler = ata_dummy_noret,
7119 .post_internal_cmd = ata_dummy_qc_noret,
7120 .irq_clear = ata_dummy_noret,
7121 .port_start = ata_dummy_ret0,
7122 .port_stop = ata_dummy_noret,
7123};
7124
21b0ad4f
TH
7125const struct ata_port_info ata_dummy_port_info = {
7126 .port_ops = &ata_dummy_port_ops,
7127};
7128
1da177e4
LT
7129/*
7130 * libata is essentially a library of internal helper functions for
7131 * low-level ATA host controller drivers. As such, the API/ABI is
7132 * likely to change as new drivers are added and updated.
7133 * Do not depend on ABI/API stability.
7134 */
7135
e9c83914
TH
7136EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7137EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7138EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7139EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7140EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7141EXPORT_SYMBOL_GPL(ata_std_bios_param);
7142EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7143EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7144EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7145EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7146EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7147EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7148EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7149EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7150EXPORT_SYMBOL_GPL(ata_sg_init);
7151EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7152EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7153EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7154EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7155EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7156EXPORT_SYMBOL_GPL(ata_tf_load);
7157EXPORT_SYMBOL_GPL(ata_tf_read);
7158EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7159EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7160EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7161EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7162EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7163EXPORT_SYMBOL_GPL(ata_check_status);
7164EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7165EXPORT_SYMBOL_GPL(ata_exec_command);
7166EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7167EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7168EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7169EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7170EXPORT_SYMBOL_GPL(ata_data_xfer);
7171EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7172EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7173EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7174EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7175EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7176EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7177EXPORT_SYMBOL_GPL(ata_bmdma_start);
7178EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7179EXPORT_SYMBOL_GPL(ata_bmdma_status);
7180EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7181EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7182EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7183EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7184EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7185EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7186EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7187EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7188EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7189EXPORT_SYMBOL_GPL(sata_link_debounce);
7190EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7191EXPORT_SYMBOL_GPL(sata_phy_reset);
7192EXPORT_SYMBOL_GPL(__sata_phy_reset);
7193EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7194EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7195EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7196EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7197EXPORT_SYMBOL_GPL(sata_std_hardreset);
7198EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7199EXPORT_SYMBOL_GPL(ata_dev_classify);
7200EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7201EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7202EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7203EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7204EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 7205EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7206EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7207EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7208EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7209EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7210EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7211EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7212EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7213EXPORT_SYMBOL_GPL(sata_scr_valid);
7214EXPORT_SYMBOL_GPL(sata_scr_read);
7215EXPORT_SYMBOL_GPL(sata_scr_write);
7216EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7217EXPORT_SYMBOL_GPL(ata_link_online);
7218EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7219#ifdef CONFIG_PM
cca3974e
JG
7220EXPORT_SYMBOL_GPL(ata_host_suspend);
7221EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7222#endif /* CONFIG_PM */
6a62a04d
TH
7223EXPORT_SYMBOL_GPL(ata_id_string);
7224EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7225EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7226EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7227
1bc4ccff 7228EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7229EXPORT_SYMBOL_GPL(ata_timing_compute);
7230EXPORT_SYMBOL_GPL(ata_timing_merge);
7231
1da177e4
LT
7232#ifdef CONFIG_PCI
7233EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7234EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7235EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7236EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7237EXPORT_SYMBOL_GPL(ata_pci_init_one);
7238EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7239#ifdef CONFIG_PM
500530f6
TH
7240EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7241EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7242EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7243EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7244#endif /* CONFIG_PM */
67951ade
AC
7245EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7246EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7247#endif /* CONFIG_PCI */
9b847548 7248
b64bbc39
TH
7249EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7250EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7251EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7252EXPORT_SYMBOL_GPL(ata_port_desc);
7253#ifdef CONFIG_PCI
7254EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7255#endif /* CONFIG_PCI */
ece1d636 7256EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7257EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7258EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7259EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7260EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7261EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7262EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7263EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7264EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7265EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7266EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7267EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7268EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7269
7270EXPORT_SYMBOL_GPL(ata_cable_40wire);
7271EXPORT_SYMBOL_GPL(ata_cable_80wire);
7272EXPORT_SYMBOL_GPL(ata_cable_unknown);
7273EXPORT_SYMBOL_GPL(ata_cable_sata);