]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/ata/libata-core.c
libata: add qc_fill_rtf port operation
[mirror_ubuntu-eoan-kernel.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
1da177e4
LT
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
67846b30 57#include <linux/jiffies.h>
378f058c 58#include <linux/scatterlist.h>
2dcb407e 59#include <linux/io.h>
1da177e4 60#include <scsi/scsi.h>
193515d5 61#include <scsi/scsi_cmnd.h>
1da177e4
LT
62#include <scsi/scsi_host.h>
63#include <linux/libata.h>
1da177e4
LT
64#include <asm/semaphore.h>
65#include <asm/byteorder.h>
140b5e59 66#include <linux/cdrom.h>
1da177e4
LT
67
68#include "libata.h"
69
fda0efc5 70
d7bb4cc7 71/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
72const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
73const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
74const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 75
029cfd6b 76const struct ata_port_operations ata_base_port_ops = {
22183bf5 77 .qc_fill_rtf = ata_sff_qc_fill_rtf,
0aa1113d 78 .prereset = ata_std_prereset,
203c75b8 79 .postreset = ata_std_postreset,
a1efdaba 80 .error_handler = ata_std_error_handler,
029cfd6b
TH
81};
82
83const struct ata_port_operations sata_port_ops = {
84 .inherits = &ata_base_port_ops,
85
86 .qc_defer = ata_std_qc_defer,
57c9efdf 87 .hardreset = sata_std_hardreset,
5682ed33 88 .sff_dev_select = ata_noop_dev_select,
029cfd6b
TH
89};
90
91const struct ata_port_operations sata_pmp_port_ops = {
92 .inherits = &sata_port_ops,
a1efdaba 93
ac371987 94 .pmp_prereset = ata_std_prereset,
5958e302 95 .pmp_hardreset = sata_std_hardreset,
ac371987 96 .pmp_postreset = ata_std_postreset,
a1efdaba 97 .error_handler = sata_pmp_error_handler,
029cfd6b
TH
98};
99
3373efd8
TH
100static unsigned int ata_dev_init_params(struct ata_device *dev,
101 u16 heads, u16 sectors);
102static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
103static unsigned int ata_dev_set_feature(struct ata_device *dev,
104 u8 enable, u8 feature);
3373efd8 105static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 106static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 107
f3187195 108unsigned int ata_print_id = 1;
1da177e4
LT
109static struct workqueue_struct *ata_wq;
110
453b07ac
TH
111struct workqueue_struct *ata_aux_wq;
112
33267325
TH
113struct ata_force_param {
114 const char *name;
115 unsigned int cbl;
116 int spd_limit;
117 unsigned long xfer_mask;
118 unsigned int horkage_on;
119 unsigned int horkage_off;
120};
121
122struct ata_force_ent {
123 int port;
124 int device;
125 struct ata_force_param param;
126};
127
128static struct ata_force_ent *ata_force_tbl;
129static int ata_force_tbl_size;
130
131static char ata_force_param_buf[PAGE_SIZE] __initdata;
7afb4222
TH
132/* param_buf is thrown away after initialization, disallow read */
133module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
33267325
TH
134MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
135
418dc1f5 136int atapi_enabled = 1;
1623c81e
JG
137module_param(atapi_enabled, int, 0444);
138MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
139
c5c61bda 140static int atapi_dmadir = 0;
95de719a
AL
141module_param(atapi_dmadir, int, 0444);
142MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
143
baf4fdfa
ML
144int atapi_passthru16 = 1;
145module_param(atapi_passthru16, int, 0444);
146MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
147
c3c013a2
JG
148int libata_fua = 0;
149module_param_named(fua, libata_fua, int, 0444);
150MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
151
2dcb407e 152static int ata_ignore_hpa;
1e999736
AC
153module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
154MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
155
b3a70601
AC
156static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
157module_param_named(dma, libata_dma_mask, int, 0444);
158MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
159
a8601e5f
AM
160static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
161module_param(ata_probe_timeout, int, 0444);
162MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
163
6ebe9d86 164int libata_noacpi = 0;
d7d0dad6 165module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 166MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 167
ae8d4ee7
AC
168int libata_allow_tpm = 0;
169module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
170MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
171
1da177e4
LT
172MODULE_AUTHOR("Jeff Garzik");
173MODULE_DESCRIPTION("Library module for ATA devices");
174MODULE_LICENSE("GPL");
175MODULE_VERSION(DRV_VERSION);
176
0baab86b 177
33267325
TH
178/**
179 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 180 * @ap: ATA port of interest
33267325
TH
181 *
182 * Force cable type according to libata.force and whine about it.
183 * The last entry which has matching port number is used, so it
184 * can be specified as part of device force parameters. For
185 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
186 * same effect.
187 *
188 * LOCKING:
189 * EH context.
190 */
191void ata_force_cbl(struct ata_port *ap)
192{
193 int i;
194
195 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
196 const struct ata_force_ent *fe = &ata_force_tbl[i];
197
198 if (fe->port != -1 && fe->port != ap->print_id)
199 continue;
200
201 if (fe->param.cbl == ATA_CBL_NONE)
202 continue;
203
204 ap->cbl = fe->param.cbl;
205 ata_port_printk(ap, KERN_NOTICE,
206 "FORCE: cable set to %s\n", fe->param.name);
207 return;
208 }
209}
210
211/**
212 * ata_force_spd_limit - force SATA spd limit according to libata.force
213 * @link: ATA link of interest
214 *
215 * Force SATA spd limit according to libata.force and whine about
216 * it. When only the port part is specified (e.g. 1:), the limit
217 * applies to all links connected to both the host link and all
218 * fan-out ports connected via PMP. If the device part is
219 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
220 * link not the host link. Device number 15 always points to the
221 * host link whether PMP is attached or not.
222 *
223 * LOCKING:
224 * EH context.
225 */
226static void ata_force_spd_limit(struct ata_link *link)
227{
228 int linkno, i;
229
230 if (ata_is_host_link(link))
231 linkno = 15;
232 else
233 linkno = link->pmp;
234
235 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
236 const struct ata_force_ent *fe = &ata_force_tbl[i];
237
238 if (fe->port != -1 && fe->port != link->ap->print_id)
239 continue;
240
241 if (fe->device != -1 && fe->device != linkno)
242 continue;
243
244 if (!fe->param.spd_limit)
245 continue;
246
247 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
248 ata_link_printk(link, KERN_NOTICE,
249 "FORCE: PHY spd limit set to %s\n", fe->param.name);
250 return;
251 }
252}
253
254/**
255 * ata_force_xfermask - force xfermask according to libata.force
256 * @dev: ATA device of interest
257 *
258 * Force xfer_mask according to libata.force and whine about it.
259 * For consistency with link selection, device number 15 selects
260 * the first device connected to the host link.
261 *
262 * LOCKING:
263 * EH context.
264 */
265static void ata_force_xfermask(struct ata_device *dev)
266{
267 int devno = dev->link->pmp + dev->devno;
268 int alt_devno = devno;
269 int i;
270
271 /* allow n.15 for the first device attached to host port */
272 if (ata_is_host_link(dev->link) && devno == 0)
273 alt_devno = 15;
274
275 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
276 const struct ata_force_ent *fe = &ata_force_tbl[i];
277 unsigned long pio_mask, mwdma_mask, udma_mask;
278
279 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
280 continue;
281
282 if (fe->device != -1 && fe->device != devno &&
283 fe->device != alt_devno)
284 continue;
285
286 if (!fe->param.xfer_mask)
287 continue;
288
289 ata_unpack_xfermask(fe->param.xfer_mask,
290 &pio_mask, &mwdma_mask, &udma_mask);
291 if (udma_mask)
292 dev->udma_mask = udma_mask;
293 else if (mwdma_mask) {
294 dev->udma_mask = 0;
295 dev->mwdma_mask = mwdma_mask;
296 } else {
297 dev->udma_mask = 0;
298 dev->mwdma_mask = 0;
299 dev->pio_mask = pio_mask;
300 }
301
302 ata_dev_printk(dev, KERN_NOTICE,
303 "FORCE: xfer_mask set to %s\n", fe->param.name);
304 return;
305 }
306}
307
308/**
309 * ata_force_horkage - force horkage according to libata.force
310 * @dev: ATA device of interest
311 *
312 * Force horkage according to libata.force and whine about it.
313 * For consistency with link selection, device number 15 selects
314 * the first device connected to the host link.
315 *
316 * LOCKING:
317 * EH context.
318 */
319static void ata_force_horkage(struct ata_device *dev)
320{
321 int devno = dev->link->pmp + dev->devno;
322 int alt_devno = devno;
323 int i;
324
325 /* allow n.15 for the first device attached to host port */
326 if (ata_is_host_link(dev->link) && devno == 0)
327 alt_devno = 15;
328
329 for (i = 0; i < ata_force_tbl_size; i++) {
330 const struct ata_force_ent *fe = &ata_force_tbl[i];
331
332 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
333 continue;
334
335 if (fe->device != -1 && fe->device != devno &&
336 fe->device != alt_devno)
337 continue;
338
339 if (!(~dev->horkage & fe->param.horkage_on) &&
340 !(dev->horkage & fe->param.horkage_off))
341 continue;
342
343 dev->horkage |= fe->param.horkage_on;
344 dev->horkage &= ~fe->param.horkage_off;
345
346 ata_dev_printk(dev, KERN_NOTICE,
347 "FORCE: horkage modified (%s)\n", fe->param.name);
348 }
349}
350
436d34b3
TH
351/**
352 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
353 * @opcode: SCSI opcode
354 *
355 * Determine ATAPI command type from @opcode.
356 *
357 * LOCKING:
358 * None.
359 *
360 * RETURNS:
361 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
362 */
363int atapi_cmd_type(u8 opcode)
364{
365 switch (opcode) {
366 case GPCMD_READ_10:
367 case GPCMD_READ_12:
368 return ATAPI_READ;
369
370 case GPCMD_WRITE_10:
371 case GPCMD_WRITE_12:
372 case GPCMD_WRITE_AND_VERIFY_10:
373 return ATAPI_WRITE;
374
375 case GPCMD_READ_CD:
376 case GPCMD_READ_CD_MSF:
377 return ATAPI_READ_CD;
378
e52dcc48
TH
379 case ATA_16:
380 case ATA_12:
381 if (atapi_passthru16)
382 return ATAPI_PASS_THRU;
383 /* fall thru */
436d34b3
TH
384 default:
385 return ATAPI_MISC;
386 }
387}
388
1da177e4
LT
389/**
390 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
391 * @tf: Taskfile to convert
1da177e4 392 * @pmp: Port multiplier port
9977126c
TH
393 * @is_cmd: This FIS is for command
394 * @fis: Buffer into which data will output
1da177e4
LT
395 *
396 * Converts a standard ATA taskfile to a Serial ATA
397 * FIS structure (Register - Host to Device).
398 *
399 * LOCKING:
400 * Inherited from caller.
401 */
9977126c 402void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 403{
9977126c
TH
404 fis[0] = 0x27; /* Register - Host to Device FIS */
405 fis[1] = pmp & 0xf; /* Port multiplier number*/
406 if (is_cmd)
407 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
408
1da177e4
LT
409 fis[2] = tf->command;
410 fis[3] = tf->feature;
411
412 fis[4] = tf->lbal;
413 fis[5] = tf->lbam;
414 fis[6] = tf->lbah;
415 fis[7] = tf->device;
416
417 fis[8] = tf->hob_lbal;
418 fis[9] = tf->hob_lbam;
419 fis[10] = tf->hob_lbah;
420 fis[11] = tf->hob_feature;
421
422 fis[12] = tf->nsect;
423 fis[13] = tf->hob_nsect;
424 fis[14] = 0;
425 fis[15] = tf->ctl;
426
427 fis[16] = 0;
428 fis[17] = 0;
429 fis[18] = 0;
430 fis[19] = 0;
431}
432
433/**
434 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
435 * @fis: Buffer from which data will be input
436 * @tf: Taskfile to output
437 *
e12a1be6 438 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
439 *
440 * LOCKING:
441 * Inherited from caller.
442 */
443
057ace5e 444void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
445{
446 tf->command = fis[2]; /* status */
447 tf->feature = fis[3]; /* error */
448
449 tf->lbal = fis[4];
450 tf->lbam = fis[5];
451 tf->lbah = fis[6];
452 tf->device = fis[7];
453
454 tf->hob_lbal = fis[8];
455 tf->hob_lbam = fis[9];
456 tf->hob_lbah = fis[10];
457
458 tf->nsect = fis[12];
459 tf->hob_nsect = fis[13];
460}
461
8cbd6df1
AL
462static const u8 ata_rw_cmds[] = {
463 /* pio multi */
464 ATA_CMD_READ_MULTI,
465 ATA_CMD_WRITE_MULTI,
466 ATA_CMD_READ_MULTI_EXT,
467 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
468 0,
469 0,
470 0,
471 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
472 /* pio */
473 ATA_CMD_PIO_READ,
474 ATA_CMD_PIO_WRITE,
475 ATA_CMD_PIO_READ_EXT,
476 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
477 0,
478 0,
479 0,
480 0,
8cbd6df1
AL
481 /* dma */
482 ATA_CMD_READ,
483 ATA_CMD_WRITE,
484 ATA_CMD_READ_EXT,
9a3dccc4
TH
485 ATA_CMD_WRITE_EXT,
486 0,
487 0,
488 0,
489 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 490};
1da177e4
LT
491
492/**
8cbd6df1 493 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
494 * @tf: command to examine and configure
495 * @dev: device tf belongs to
1da177e4 496 *
2e9edbf8 497 * Examine the device configuration and tf->flags to calculate
8cbd6df1 498 * the proper read/write commands and protocol to use.
1da177e4
LT
499 *
500 * LOCKING:
501 * caller.
502 */
bd056d7e 503static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 504{
9a3dccc4 505 u8 cmd;
1da177e4 506
9a3dccc4 507 int index, fua, lba48, write;
2e9edbf8 508
9a3dccc4 509 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
510 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
511 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 512
8cbd6df1
AL
513 if (dev->flags & ATA_DFLAG_PIO) {
514 tf->protocol = ATA_PROT_PIO;
9a3dccc4 515 index = dev->multi_count ? 0 : 8;
9af5c9c9 516 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
517 /* Unable to use DMA due to host limitation */
518 tf->protocol = ATA_PROT_PIO;
0565c26d 519 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
520 } else {
521 tf->protocol = ATA_PROT_DMA;
9a3dccc4 522 index = 16;
8cbd6df1 523 }
1da177e4 524
9a3dccc4
TH
525 cmd = ata_rw_cmds[index + fua + lba48 + write];
526 if (cmd) {
527 tf->command = cmd;
528 return 0;
529 }
530 return -1;
1da177e4
LT
531}
532
35b649fe
TH
533/**
534 * ata_tf_read_block - Read block address from ATA taskfile
535 * @tf: ATA taskfile of interest
536 * @dev: ATA device @tf belongs to
537 *
538 * LOCKING:
539 * None.
540 *
541 * Read block address from @tf. This function can handle all
542 * three address formats - LBA, LBA48 and CHS. tf->protocol and
543 * flags select the address format to use.
544 *
545 * RETURNS:
546 * Block address read from @tf.
547 */
548u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
549{
550 u64 block = 0;
551
552 if (tf->flags & ATA_TFLAG_LBA) {
553 if (tf->flags & ATA_TFLAG_LBA48) {
554 block |= (u64)tf->hob_lbah << 40;
555 block |= (u64)tf->hob_lbam << 32;
556 block |= tf->hob_lbal << 24;
557 } else
558 block |= (tf->device & 0xf) << 24;
559
560 block |= tf->lbah << 16;
561 block |= tf->lbam << 8;
562 block |= tf->lbal;
563 } else {
564 u32 cyl, head, sect;
565
566 cyl = tf->lbam | (tf->lbah << 8);
567 head = tf->device & 0xf;
568 sect = tf->lbal;
569
570 block = (cyl * dev->heads + head) * dev->sectors + sect;
571 }
572
573 return block;
574}
575
bd056d7e
TH
576/**
577 * ata_build_rw_tf - Build ATA taskfile for given read/write request
578 * @tf: Target ATA taskfile
579 * @dev: ATA device @tf belongs to
580 * @block: Block address
581 * @n_block: Number of blocks
582 * @tf_flags: RW/FUA etc...
583 * @tag: tag
584 *
585 * LOCKING:
586 * None.
587 *
588 * Build ATA taskfile @tf for read/write request described by
589 * @block, @n_block, @tf_flags and @tag on @dev.
590 *
591 * RETURNS:
592 *
593 * 0 on success, -ERANGE if the request is too large for @dev,
594 * -EINVAL if the request is invalid.
595 */
596int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
597 u64 block, u32 n_block, unsigned int tf_flags,
598 unsigned int tag)
599{
600 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
601 tf->flags |= tf_flags;
602
6d1245bf 603 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
604 /* yay, NCQ */
605 if (!lba_48_ok(block, n_block))
606 return -ERANGE;
607
608 tf->protocol = ATA_PROT_NCQ;
609 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
610
611 if (tf->flags & ATA_TFLAG_WRITE)
612 tf->command = ATA_CMD_FPDMA_WRITE;
613 else
614 tf->command = ATA_CMD_FPDMA_READ;
615
616 tf->nsect = tag << 3;
617 tf->hob_feature = (n_block >> 8) & 0xff;
618 tf->feature = n_block & 0xff;
619
620 tf->hob_lbah = (block >> 40) & 0xff;
621 tf->hob_lbam = (block >> 32) & 0xff;
622 tf->hob_lbal = (block >> 24) & 0xff;
623 tf->lbah = (block >> 16) & 0xff;
624 tf->lbam = (block >> 8) & 0xff;
625 tf->lbal = block & 0xff;
626
627 tf->device = 1 << 6;
628 if (tf->flags & ATA_TFLAG_FUA)
629 tf->device |= 1 << 7;
630 } else if (dev->flags & ATA_DFLAG_LBA) {
631 tf->flags |= ATA_TFLAG_LBA;
632
633 if (lba_28_ok(block, n_block)) {
634 /* use LBA28 */
635 tf->device |= (block >> 24) & 0xf;
636 } else if (lba_48_ok(block, n_block)) {
637 if (!(dev->flags & ATA_DFLAG_LBA48))
638 return -ERANGE;
639
640 /* use LBA48 */
641 tf->flags |= ATA_TFLAG_LBA48;
642
643 tf->hob_nsect = (n_block >> 8) & 0xff;
644
645 tf->hob_lbah = (block >> 40) & 0xff;
646 tf->hob_lbam = (block >> 32) & 0xff;
647 tf->hob_lbal = (block >> 24) & 0xff;
648 } else
649 /* request too large even for LBA48 */
650 return -ERANGE;
651
652 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
653 return -EINVAL;
654
655 tf->nsect = n_block & 0xff;
656
657 tf->lbah = (block >> 16) & 0xff;
658 tf->lbam = (block >> 8) & 0xff;
659 tf->lbal = block & 0xff;
660
661 tf->device |= ATA_LBA;
662 } else {
663 /* CHS */
664 u32 sect, head, cyl, track;
665
666 /* The request -may- be too large for CHS addressing. */
667 if (!lba_28_ok(block, n_block))
668 return -ERANGE;
669
670 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
671 return -EINVAL;
672
673 /* Convert LBA to CHS */
674 track = (u32)block / dev->sectors;
675 cyl = track / dev->heads;
676 head = track % dev->heads;
677 sect = (u32)block % dev->sectors + 1;
678
679 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
680 (u32)block, track, cyl, head, sect);
681
682 /* Check whether the converted CHS can fit.
683 Cylinder: 0-65535
684 Head: 0-15
685 Sector: 1-255*/
686 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
687 return -ERANGE;
688
689 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
690 tf->lbal = sect;
691 tf->lbam = cyl;
692 tf->lbah = cyl >> 8;
693 tf->device |= head;
694 }
695
696 return 0;
697}
698
cb95d562
TH
699/**
700 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
701 * @pio_mask: pio_mask
702 * @mwdma_mask: mwdma_mask
703 * @udma_mask: udma_mask
704 *
705 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
706 * unsigned int xfer_mask.
707 *
708 * LOCKING:
709 * None.
710 *
711 * RETURNS:
712 * Packed xfer_mask.
713 */
7dc951ae
TH
714unsigned long ata_pack_xfermask(unsigned long pio_mask,
715 unsigned long mwdma_mask,
716 unsigned long udma_mask)
cb95d562
TH
717{
718 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
719 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
720 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
721}
722
c0489e4e
TH
723/**
724 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
725 * @xfer_mask: xfer_mask to unpack
726 * @pio_mask: resulting pio_mask
727 * @mwdma_mask: resulting mwdma_mask
728 * @udma_mask: resulting udma_mask
729 *
730 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
731 * Any NULL distination masks will be ignored.
732 */
7dc951ae
TH
733void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
734 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
735{
736 if (pio_mask)
737 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
738 if (mwdma_mask)
739 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
740 if (udma_mask)
741 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
742}
743
cb95d562 744static const struct ata_xfer_ent {
be9a50c8 745 int shift, bits;
cb95d562
TH
746 u8 base;
747} ata_xfer_tbl[] = {
70cd071e
TH
748 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
749 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
750 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
751 { -1, },
752};
753
754/**
755 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
756 * @xfer_mask: xfer_mask of interest
757 *
758 * Return matching XFER_* value for @xfer_mask. Only the highest
759 * bit of @xfer_mask is considered.
760 *
761 * LOCKING:
762 * None.
763 *
764 * RETURNS:
70cd071e 765 * Matching XFER_* value, 0xff if no match found.
cb95d562 766 */
7dc951ae 767u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
768{
769 int highbit = fls(xfer_mask) - 1;
770 const struct ata_xfer_ent *ent;
771
772 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
773 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
774 return ent->base + highbit - ent->shift;
70cd071e 775 return 0xff;
cb95d562
TH
776}
777
778/**
779 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
780 * @xfer_mode: XFER_* of interest
781 *
782 * Return matching xfer_mask for @xfer_mode.
783 *
784 * LOCKING:
785 * None.
786 *
787 * RETURNS:
788 * Matching xfer_mask, 0 if no match found.
789 */
7dc951ae 790unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
791{
792 const struct ata_xfer_ent *ent;
793
794 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
795 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
796 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
797 & ~((1 << ent->shift) - 1);
cb95d562
TH
798 return 0;
799}
800
801/**
802 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
803 * @xfer_mode: XFER_* of interest
804 *
805 * Return matching xfer_shift for @xfer_mode.
806 *
807 * LOCKING:
808 * None.
809 *
810 * RETURNS:
811 * Matching xfer_shift, -1 if no match found.
812 */
7dc951ae 813int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
814{
815 const struct ata_xfer_ent *ent;
816
817 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
818 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
819 return ent->shift;
820 return -1;
821}
822
1da177e4 823/**
1da7b0d0
TH
824 * ata_mode_string - convert xfer_mask to string
825 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
826 *
827 * Determine string which represents the highest speed
1da7b0d0 828 * (highest bit in @modemask).
1da177e4
LT
829 *
830 * LOCKING:
831 * None.
832 *
833 * RETURNS:
834 * Constant C string representing highest speed listed in
1da7b0d0 835 * @mode_mask, or the constant C string "<n/a>".
1da177e4 836 */
7dc951ae 837const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 838{
75f554bc
TH
839 static const char * const xfer_mode_str[] = {
840 "PIO0",
841 "PIO1",
842 "PIO2",
843 "PIO3",
844 "PIO4",
b352e57d
AC
845 "PIO5",
846 "PIO6",
75f554bc
TH
847 "MWDMA0",
848 "MWDMA1",
849 "MWDMA2",
b352e57d
AC
850 "MWDMA3",
851 "MWDMA4",
75f554bc
TH
852 "UDMA/16",
853 "UDMA/25",
854 "UDMA/33",
855 "UDMA/44",
856 "UDMA/66",
857 "UDMA/100",
858 "UDMA/133",
859 "UDMA7",
860 };
1da7b0d0 861 int highbit;
1da177e4 862
1da7b0d0
TH
863 highbit = fls(xfer_mask) - 1;
864 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
865 return xfer_mode_str[highbit];
1da177e4 866 return "<n/a>";
1da177e4
LT
867}
868
4c360c81
TH
869static const char *sata_spd_string(unsigned int spd)
870{
871 static const char * const spd_str[] = {
872 "1.5 Gbps",
873 "3.0 Gbps",
874 };
875
876 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
877 return "<unknown>";
878 return spd_str[spd - 1];
879}
880
3373efd8 881void ata_dev_disable(struct ata_device *dev)
0b8efb0a 882{
09d7f9b0 883 if (ata_dev_enabled(dev)) {
9af5c9c9 884 if (ata_msg_drv(dev->link->ap))
09d7f9b0 885 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
562f0c2d 886 ata_acpi_on_disable(dev);
4ae72a1e
TH
887 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
888 ATA_DNXFER_QUIET);
0b8efb0a
TH
889 dev->class++;
890 }
891}
892
ca77329f
KCA
893static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
894{
895 struct ata_link *link = dev->link;
896 struct ata_port *ap = link->ap;
897 u32 scontrol;
898 unsigned int err_mask;
899 int rc;
900
901 /*
902 * disallow DIPM for drivers which haven't set
903 * ATA_FLAG_IPM. This is because when DIPM is enabled,
904 * phy ready will be set in the interrupt status on
905 * state changes, which will cause some drivers to
906 * think there are errors - additionally drivers will
907 * need to disable hot plug.
908 */
909 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
910 ap->pm_policy = NOT_AVAILABLE;
911 return -EINVAL;
912 }
913
914 /*
915 * For DIPM, we will only enable it for the
916 * min_power setting.
917 *
918 * Why? Because Disks are too stupid to know that
919 * If the host rejects a request to go to SLUMBER
920 * they should retry at PARTIAL, and instead it
921 * just would give up. So, for medium_power to
922 * work at all, we need to only allow HIPM.
923 */
924 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
925 if (rc)
926 return rc;
927
928 switch (policy) {
929 case MIN_POWER:
930 /* no restrictions on IPM transitions */
931 scontrol &= ~(0x3 << 8);
932 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
933 if (rc)
934 return rc;
935
936 /* enable DIPM */
937 if (dev->flags & ATA_DFLAG_DIPM)
938 err_mask = ata_dev_set_feature(dev,
939 SETFEATURES_SATA_ENABLE, SATA_DIPM);
940 break;
941 case MEDIUM_POWER:
942 /* allow IPM to PARTIAL */
943 scontrol &= ~(0x1 << 8);
944 scontrol |= (0x2 << 8);
945 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
946 if (rc)
947 return rc;
948
f5456b63
KCA
949 /*
950 * we don't have to disable DIPM since IPM flags
951 * disallow transitions to SLUMBER, which effectively
952 * disable DIPM if it does not support PARTIAL
953 */
ca77329f
KCA
954 break;
955 case NOT_AVAILABLE:
956 case MAX_PERFORMANCE:
957 /* disable all IPM transitions */
958 scontrol |= (0x3 << 8);
959 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
960 if (rc)
961 return rc;
962
f5456b63
KCA
963 /*
964 * we don't have to disable DIPM since IPM flags
965 * disallow all transitions which effectively
966 * disable DIPM anyway.
967 */
ca77329f
KCA
968 break;
969 }
970
971 /* FIXME: handle SET FEATURES failure */
972 (void) err_mask;
973
974 return 0;
975}
976
977/**
978 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
979 * @dev: device to enable power management
980 * @policy: the link power management policy
ca77329f
KCA
981 *
982 * Enable SATA Interface power management. This will enable
983 * Device Interface Power Management (DIPM) for min_power
984 * policy, and then call driver specific callbacks for
985 * enabling Host Initiated Power management.
986 *
987 * Locking: Caller.
988 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
989 */
990void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
991{
992 int rc = 0;
993 struct ata_port *ap = dev->link->ap;
994
995 /* set HIPM first, then DIPM */
996 if (ap->ops->enable_pm)
997 rc = ap->ops->enable_pm(ap, policy);
998 if (rc)
999 goto enable_pm_out;
1000 rc = ata_dev_set_dipm(dev, policy);
1001
1002enable_pm_out:
1003 if (rc)
1004 ap->pm_policy = MAX_PERFORMANCE;
1005 else
1006 ap->pm_policy = policy;
1007 return /* rc */; /* hopefully we can use 'rc' eventually */
1008}
1009
1992a5ed 1010#ifdef CONFIG_PM
ca77329f
KCA
1011/**
1012 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 1013 * @dev: device to disable power management
ca77329f
KCA
1014 *
1015 * Disable SATA Interface power management. This will disable
1016 * Device Interface Power Management (DIPM) without changing
1017 * policy, call driver specific callbacks for disabling Host
1018 * Initiated Power management.
1019 *
1020 * Locking: Caller.
1021 * Returns: void
1022 */
1023static void ata_dev_disable_pm(struct ata_device *dev)
1024{
1025 struct ata_port *ap = dev->link->ap;
1026
1027 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1028 if (ap->ops->disable_pm)
1029 ap->ops->disable_pm(ap);
1030}
1992a5ed 1031#endif /* CONFIG_PM */
ca77329f
KCA
1032
1033void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1034{
1035 ap->pm_policy = policy;
3ec25ebd 1036 ap->link.eh_info.action |= ATA_EH_LPM;
ca77329f
KCA
1037 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1038 ata_port_schedule_eh(ap);
1039}
1040
1992a5ed 1041#ifdef CONFIG_PM
ca77329f
KCA
1042static void ata_lpm_enable(struct ata_host *host)
1043{
1044 struct ata_link *link;
1045 struct ata_port *ap;
1046 struct ata_device *dev;
1047 int i;
1048
1049 for (i = 0; i < host->n_ports; i++) {
1050 ap = host->ports[i];
1051 ata_port_for_each_link(link, ap) {
1052 ata_link_for_each_dev(dev, link)
1053 ata_dev_disable_pm(dev);
1054 }
1055 }
1056}
1057
1058static void ata_lpm_disable(struct ata_host *host)
1059{
1060 int i;
1061
1062 for (i = 0; i < host->n_ports; i++) {
1063 struct ata_port *ap = host->ports[i];
1064 ata_lpm_schedule(ap, ap->pm_policy);
1065 }
1066}
1992a5ed 1067#endif /* CONFIG_PM */
ca77329f 1068
1da177e4
LT
1069/**
1070 * ata_dev_classify - determine device type based on ATA-spec signature
1071 * @tf: ATA taskfile register set for device to be identified
1072 *
1073 * Determine from taskfile register contents whether a device is
1074 * ATA or ATAPI, as per "Signature and persistence" section
1075 * of ATA/PI spec (volume 1, sect 5.14).
1076 *
1077 * LOCKING:
1078 * None.
1079 *
1080 * RETURNS:
633273a3
TH
1081 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1082 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 1083 */
057ace5e 1084unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
1085{
1086 /* Apple's open source Darwin code hints that some devices only
1087 * put a proper signature into the LBA mid/high registers,
1088 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1089 *
1090 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1091 * signatures for ATA and ATAPI devices attached on SerialATA,
1092 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1093 * spec has never mentioned about using different signatures
1094 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1095 * Multiplier specification began to use 0x69/0x96 to identify
1096 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1097 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1098 * 0x69/0x96 shortly and described them as reserved for
1099 * SerialATA.
1100 *
1101 * We follow the current spec and consider that 0x69/0x96
1102 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 1103 */
633273a3 1104 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
1105 DPRINTK("found ATA device by sig\n");
1106 return ATA_DEV_ATA;
1107 }
1108
633273a3 1109 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
1110 DPRINTK("found ATAPI device by sig\n");
1111 return ATA_DEV_ATAPI;
1112 }
1113
633273a3
TH
1114 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1115 DPRINTK("found PMP device by sig\n");
1116 return ATA_DEV_PMP;
1117 }
1118
1119 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 1120 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
1121 return ATA_DEV_SEMB_UNSUP; /* not yet */
1122 }
1123
1da177e4
LT
1124 DPRINTK("unknown device\n");
1125 return ATA_DEV_UNKNOWN;
1126}
1127
1da177e4 1128/**
6a62a04d 1129 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1130 * @id: IDENTIFY DEVICE results we will examine
1131 * @s: string into which data is output
1132 * @ofs: offset into identify device page
1133 * @len: length of string to return. must be an even number.
1134 *
1135 * The strings in the IDENTIFY DEVICE page are broken up into
1136 * 16-bit chunks. Run through the string, and output each
1137 * 8-bit chunk linearly, regardless of platform.
1138 *
1139 * LOCKING:
1140 * caller.
1141 */
1142
6a62a04d
TH
1143void ata_id_string(const u16 *id, unsigned char *s,
1144 unsigned int ofs, unsigned int len)
1da177e4
LT
1145{
1146 unsigned int c;
1147
1148 while (len > 0) {
1149 c = id[ofs] >> 8;
1150 *s = c;
1151 s++;
1152
1153 c = id[ofs] & 0xff;
1154 *s = c;
1155 s++;
1156
1157 ofs++;
1158 len -= 2;
1159 }
1160}
1161
0e949ff3 1162/**
6a62a04d 1163 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1164 * @id: IDENTIFY DEVICE results we will examine
1165 * @s: string into which data is output
1166 * @ofs: offset into identify device page
1167 * @len: length of string to return. must be an odd number.
1168 *
6a62a04d 1169 * This function is identical to ata_id_string except that it
0e949ff3
TH
1170 * trims trailing spaces and terminates the resulting string with
1171 * null. @len must be actual maximum length (even number) + 1.
1172 *
1173 * LOCKING:
1174 * caller.
1175 */
6a62a04d
TH
1176void ata_id_c_string(const u16 *id, unsigned char *s,
1177 unsigned int ofs, unsigned int len)
0e949ff3
TH
1178{
1179 unsigned char *p;
1180
1181 WARN_ON(!(len & 1));
1182
6a62a04d 1183 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1184
1185 p = s + strnlen(s, len - 1);
1186 while (p > s && p[-1] == ' ')
1187 p--;
1188 *p = '\0';
1189}
0baab86b 1190
db6f8759
TH
1191static u64 ata_id_n_sectors(const u16 *id)
1192{
1193 if (ata_id_has_lba(id)) {
1194 if (ata_id_has_lba48(id))
1195 return ata_id_u64(id, 100);
1196 else
1197 return ata_id_u32(id, 60);
1198 } else {
1199 if (ata_id_current_chs_valid(id))
1200 return ata_id_u32(id, 57);
1201 else
1202 return id[1] * id[3] * id[6];
1203 }
1204}
1205
a5987e0a 1206u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1e999736
AC
1207{
1208 u64 sectors = 0;
1209
1210 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1211 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1212 sectors |= (tf->hob_lbal & 0xff) << 24;
1213 sectors |= (tf->lbah & 0xff) << 16;
1214 sectors |= (tf->lbam & 0xff) << 8;
1215 sectors |= (tf->lbal & 0xff);
1216
a5987e0a 1217 return sectors;
1e999736
AC
1218}
1219
a5987e0a 1220u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1e999736
AC
1221{
1222 u64 sectors = 0;
1223
1224 sectors |= (tf->device & 0x0f) << 24;
1225 sectors |= (tf->lbah & 0xff) << 16;
1226 sectors |= (tf->lbam & 0xff) << 8;
1227 sectors |= (tf->lbal & 0xff);
1228
a5987e0a 1229 return sectors;
1e999736
AC
1230}
1231
1232/**
c728a914
TH
1233 * ata_read_native_max_address - Read native max address
1234 * @dev: target device
1235 * @max_sectors: out parameter for the result native max address
1e999736 1236 *
c728a914
TH
1237 * Perform an LBA48 or LBA28 native size query upon the device in
1238 * question.
1e999736 1239 *
c728a914
TH
1240 * RETURNS:
1241 * 0 on success, -EACCES if command is aborted by the drive.
1242 * -EIO on other errors.
1e999736 1243 */
c728a914 1244static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1245{
c728a914 1246 unsigned int err_mask;
1e999736 1247 struct ata_taskfile tf;
c728a914 1248 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1249
1250 ata_tf_init(dev, &tf);
1251
c728a914 1252 /* always clear all address registers */
1e999736 1253 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1254
c728a914
TH
1255 if (lba48) {
1256 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1257 tf.flags |= ATA_TFLAG_LBA48;
1258 } else
1259 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1260
1e999736 1261 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1262 tf.device |= ATA_LBA;
1263
2b789108 1264 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1265 if (err_mask) {
1266 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1267 "max address (err_mask=0x%x)\n", err_mask);
1268 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1269 return -EACCES;
1270 return -EIO;
1271 }
1e999736 1272
c728a914 1273 if (lba48)
a5987e0a 1274 *max_sectors = ata_tf_to_lba48(&tf) + 1;
c728a914 1275 else
a5987e0a 1276 *max_sectors = ata_tf_to_lba(&tf) + 1;
2dcb407e 1277 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1278 (*max_sectors)--;
c728a914 1279 return 0;
1e999736
AC
1280}
1281
1282/**
c728a914
TH
1283 * ata_set_max_sectors - Set max sectors
1284 * @dev: target device
6b38d1d1 1285 * @new_sectors: new max sectors value to set for the device
1e999736 1286 *
c728a914
TH
1287 * Set max sectors of @dev to @new_sectors.
1288 *
1289 * RETURNS:
1290 * 0 on success, -EACCES if command is aborted or denied (due to
1291 * previous non-volatile SET_MAX) by the drive. -EIO on other
1292 * errors.
1e999736 1293 */
05027adc 1294static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1295{
c728a914 1296 unsigned int err_mask;
1e999736 1297 struct ata_taskfile tf;
c728a914 1298 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1299
1300 new_sectors--;
1301
1302 ata_tf_init(dev, &tf);
1303
1e999736 1304 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1305
1306 if (lba48) {
1307 tf.command = ATA_CMD_SET_MAX_EXT;
1308 tf.flags |= ATA_TFLAG_LBA48;
1309
1310 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1311 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1312 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1313 } else {
c728a914
TH
1314 tf.command = ATA_CMD_SET_MAX;
1315
1e582ba4
TH
1316 tf.device |= (new_sectors >> 24) & 0xf;
1317 }
1318
1e999736 1319 tf.protocol |= ATA_PROT_NODATA;
c728a914 1320 tf.device |= ATA_LBA;
1e999736
AC
1321
1322 tf.lbal = (new_sectors >> 0) & 0xff;
1323 tf.lbam = (new_sectors >> 8) & 0xff;
1324 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1325
2b789108 1326 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1327 if (err_mask) {
1328 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1329 "max address (err_mask=0x%x)\n", err_mask);
1330 if (err_mask == AC_ERR_DEV &&
1331 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1332 return -EACCES;
1333 return -EIO;
1334 }
1335
c728a914 1336 return 0;
1e999736
AC
1337}
1338
1339/**
1340 * ata_hpa_resize - Resize a device with an HPA set
1341 * @dev: Device to resize
1342 *
1343 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1344 * it if required to the full size of the media. The caller must check
1345 * the drive has the HPA feature set enabled.
05027adc
TH
1346 *
1347 * RETURNS:
1348 * 0 on success, -errno on failure.
1e999736 1349 */
05027adc 1350static int ata_hpa_resize(struct ata_device *dev)
1e999736 1351{
05027adc
TH
1352 struct ata_eh_context *ehc = &dev->link->eh_context;
1353 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1354 u64 sectors = ata_id_n_sectors(dev->id);
1355 u64 native_sectors;
c728a914 1356 int rc;
a617c09f 1357
05027adc
TH
1358 /* do we need to do it? */
1359 if (dev->class != ATA_DEV_ATA ||
1360 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1361 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1362 return 0;
1e999736 1363
05027adc
TH
1364 /* read native max address */
1365 rc = ata_read_native_max_address(dev, &native_sectors);
1366 if (rc) {
dda7aba1
TH
1367 /* If device aborted the command or HPA isn't going to
1368 * be unlocked, skip HPA resizing.
05027adc 1369 */
dda7aba1 1370 if (rc == -EACCES || !ata_ignore_hpa) {
05027adc 1371 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
dda7aba1 1372 "broken, skipping HPA handling\n");
05027adc
TH
1373 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1374
1375 /* we can continue if device aborted the command */
1376 if (rc == -EACCES)
1377 rc = 0;
1e999736 1378 }
37301a55 1379
05027adc
TH
1380 return rc;
1381 }
1382
1383 /* nothing to do? */
1384 if (native_sectors <= sectors || !ata_ignore_hpa) {
1385 if (!print_info || native_sectors == sectors)
1386 return 0;
1387
1388 if (native_sectors > sectors)
1389 ata_dev_printk(dev, KERN_INFO,
1390 "HPA detected: current %llu, native %llu\n",
1391 (unsigned long long)sectors,
1392 (unsigned long long)native_sectors);
1393 else if (native_sectors < sectors)
1394 ata_dev_printk(dev, KERN_WARNING,
1395 "native sectors (%llu) is smaller than "
1396 "sectors (%llu)\n",
1397 (unsigned long long)native_sectors,
1398 (unsigned long long)sectors);
1399 return 0;
1400 }
1401
1402 /* let's unlock HPA */
1403 rc = ata_set_max_sectors(dev, native_sectors);
1404 if (rc == -EACCES) {
1405 /* if device aborted the command, skip HPA resizing */
1406 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1407 "(%llu -> %llu), skipping HPA handling\n",
1408 (unsigned long long)sectors,
1409 (unsigned long long)native_sectors);
1410 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1411 return 0;
1412 } else if (rc)
1413 return rc;
1414
1415 /* re-read IDENTIFY data */
1416 rc = ata_dev_reread_id(dev, 0);
1417 if (rc) {
1418 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1419 "data after HPA resizing\n");
1420 return rc;
1421 }
1422
1423 if (print_info) {
1424 u64 new_sectors = ata_id_n_sectors(dev->id);
1425 ata_dev_printk(dev, KERN_INFO,
1426 "HPA unlocked: %llu -> %llu, native %llu\n",
1427 (unsigned long long)sectors,
1428 (unsigned long long)new_sectors,
1429 (unsigned long long)native_sectors);
1430 }
1431
1432 return 0;
1e999736
AC
1433}
1434
0baab86b
EF
1435/**
1436 * ata_noop_dev_select - Select device 0/1 on ATA bus
1437 * @ap: ATA channel to manipulate
1438 * @device: ATA device (numbered from zero) to select
1439 *
1440 * This function performs no actual function.
1441 *
1442 * May be used as the dev_select() entry in ata_port_operations.
1443 *
1444 * LOCKING:
1445 * caller.
1446 */
2dcb407e 1447void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1448{
1449}
1450
1da177e4
LT
1451/**
1452 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1453 * @id: IDENTIFY DEVICE page to dump
1da177e4 1454 *
0bd3300a
TH
1455 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1456 * page.
1da177e4
LT
1457 *
1458 * LOCKING:
1459 * caller.
1460 */
1461
0bd3300a 1462static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1463{
1464 DPRINTK("49==0x%04x "
1465 "53==0x%04x "
1466 "63==0x%04x "
1467 "64==0x%04x "
1468 "75==0x%04x \n",
0bd3300a
TH
1469 id[49],
1470 id[53],
1471 id[63],
1472 id[64],
1473 id[75]);
1da177e4
LT
1474 DPRINTK("80==0x%04x "
1475 "81==0x%04x "
1476 "82==0x%04x "
1477 "83==0x%04x "
1478 "84==0x%04x \n",
0bd3300a
TH
1479 id[80],
1480 id[81],
1481 id[82],
1482 id[83],
1483 id[84]);
1da177e4
LT
1484 DPRINTK("88==0x%04x "
1485 "93==0x%04x\n",
0bd3300a
TH
1486 id[88],
1487 id[93]);
1da177e4
LT
1488}
1489
cb95d562
TH
1490/**
1491 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1492 * @id: IDENTIFY data to compute xfer mask from
1493 *
1494 * Compute the xfermask for this device. This is not as trivial
1495 * as it seems if we must consider early devices correctly.
1496 *
1497 * FIXME: pre IDE drive timing (do we care ?).
1498 *
1499 * LOCKING:
1500 * None.
1501 *
1502 * RETURNS:
1503 * Computed xfermask
1504 */
7dc951ae 1505unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1506{
7dc951ae 1507 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1508
1509 /* Usual case. Word 53 indicates word 64 is valid */
1510 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1511 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1512 pio_mask <<= 3;
1513 pio_mask |= 0x7;
1514 } else {
1515 /* If word 64 isn't valid then Word 51 high byte holds
1516 * the PIO timing number for the maximum. Turn it into
1517 * a mask.
1518 */
7a0f1c8a 1519 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1520 if (mode < 5) /* Valid PIO range */
2dcb407e 1521 pio_mask = (2 << mode) - 1;
46767aeb
AC
1522 else
1523 pio_mask = 1;
cb95d562
TH
1524
1525 /* But wait.. there's more. Design your standards by
1526 * committee and you too can get a free iordy field to
1527 * process. However its the speeds not the modes that
1528 * are supported... Note drivers using the timing API
1529 * will get this right anyway
1530 */
1531 }
1532
1533 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1534
b352e57d
AC
1535 if (ata_id_is_cfa(id)) {
1536 /*
1537 * Process compact flash extended modes
1538 */
1539 int pio = id[163] & 0x7;
1540 int dma = (id[163] >> 3) & 7;
1541
1542 if (pio)
1543 pio_mask |= (1 << 5);
1544 if (pio > 1)
1545 pio_mask |= (1 << 6);
1546 if (dma)
1547 mwdma_mask |= (1 << 3);
1548 if (dma > 1)
1549 mwdma_mask |= (1 << 4);
1550 }
1551
fb21f0d0
TH
1552 udma_mask = 0;
1553 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1554 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1555
1556 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1557}
1558
86e45b6b 1559/**
442eacc3 1560 * ata_pio_queue_task - Queue port_task
86e45b6b 1561 * @ap: The ata_port to queue port_task for
e2a7f77a 1562 * @fn: workqueue function to be scheduled
65f27f38 1563 * @data: data for @fn to use
e2a7f77a 1564 * @delay: delay time for workqueue function
86e45b6b
TH
1565 *
1566 * Schedule @fn(@data) for execution after @delay jiffies using
1567 * port_task. There is one port_task per port and it's the
1568 * user(low level driver)'s responsibility to make sure that only
1569 * one task is active at any given time.
1570 *
1571 * libata core layer takes care of synchronization between
442eacc3 1572 * port_task and EH. ata_pio_queue_task() may be ignored for EH
86e45b6b
TH
1573 * synchronization.
1574 *
1575 * LOCKING:
1576 * Inherited from caller.
1577 */
624d5c51 1578void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
86e45b6b 1579{
65f27f38 1580 ap->port_task_data = data;
86e45b6b 1581
45a66c1c
ON
1582 /* may fail if ata_port_flush_task() in progress */
1583 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1584}
1585
1586/**
1587 * ata_port_flush_task - Flush port_task
1588 * @ap: The ata_port to flush port_task for
1589 *
1590 * After this function completes, port_task is guranteed not to
1591 * be running or scheduled.
1592 *
1593 * LOCKING:
1594 * Kernel thread context (may sleep)
1595 */
1596void ata_port_flush_task(struct ata_port *ap)
1597{
86e45b6b
TH
1598 DPRINTK("ENTER\n");
1599
45a66c1c 1600 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1601
0dd4b21f 1602 if (ata_msg_ctl(ap))
7f5e4e8d 1603 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
86e45b6b
TH
1604}
1605
7102d230 1606static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1607{
77853bf2 1608 struct completion *waiting = qc->private_data;
a2a7a662 1609
a2a7a662 1610 complete(waiting);
a2a7a662
TH
1611}
1612
1613/**
2432697b 1614 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1615 * @dev: Device to which the command is sent
1616 * @tf: Taskfile registers for the command and the result
d69cf37d 1617 * @cdb: CDB for packet command
a2a7a662 1618 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1619 * @sgl: sg list for the data buffer of the command
2432697b 1620 * @n_elem: Number of sg entries
2b789108 1621 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1622 *
1623 * Executes libata internal command with timeout. @tf contains
1624 * command on entry and result on return. Timeout and error
1625 * conditions are reported via return value. No recovery action
1626 * is taken after a command times out. It's caller's duty to
1627 * clean up after timeout.
1628 *
1629 * LOCKING:
1630 * None. Should be called with kernel context, might sleep.
551e8889
TH
1631 *
1632 * RETURNS:
1633 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1634 */
2432697b
TH
1635unsigned ata_exec_internal_sg(struct ata_device *dev,
1636 struct ata_taskfile *tf, const u8 *cdb,
87260216 1637 int dma_dir, struct scatterlist *sgl,
2b789108 1638 unsigned int n_elem, unsigned long timeout)
a2a7a662 1639{
9af5c9c9
TH
1640 struct ata_link *link = dev->link;
1641 struct ata_port *ap = link->ap;
a2a7a662
TH
1642 u8 command = tf->command;
1643 struct ata_queued_cmd *qc;
2ab7db1f 1644 unsigned int tag, preempted_tag;
dedaf2b0 1645 u32 preempted_sactive, preempted_qc_active;
da917d69 1646 int preempted_nr_active_links;
60be6b9a 1647 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1648 unsigned long flags;
77853bf2 1649 unsigned int err_mask;
d95a717f 1650 int rc;
a2a7a662 1651
ba6a1308 1652 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1653
e3180499 1654 /* no internal command while frozen */
b51e9e5d 1655 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1656 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1657 return AC_ERR_SYSTEM;
1658 }
1659
2ab7db1f 1660 /* initialize internal qc */
a2a7a662 1661
2ab7db1f
TH
1662 /* XXX: Tag 0 is used for drivers with legacy EH as some
1663 * drivers choke if any other tag is given. This breaks
1664 * ata_tag_internal() test for those drivers. Don't use new
1665 * EH stuff without converting to it.
1666 */
1667 if (ap->ops->error_handler)
1668 tag = ATA_TAG_INTERNAL;
1669 else
1670 tag = 0;
1671
6cec4a39 1672 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1673 BUG();
f69499f4 1674 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1675
1676 qc->tag = tag;
1677 qc->scsicmd = NULL;
1678 qc->ap = ap;
1679 qc->dev = dev;
1680 ata_qc_reinit(qc);
1681
9af5c9c9
TH
1682 preempted_tag = link->active_tag;
1683 preempted_sactive = link->sactive;
dedaf2b0 1684 preempted_qc_active = ap->qc_active;
da917d69 1685 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1686 link->active_tag = ATA_TAG_POISON;
1687 link->sactive = 0;
dedaf2b0 1688 ap->qc_active = 0;
da917d69 1689 ap->nr_active_links = 0;
2ab7db1f
TH
1690
1691 /* prepare & issue qc */
a2a7a662 1692 qc->tf = *tf;
d69cf37d
TH
1693 if (cdb)
1694 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1695 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1696 qc->dma_dir = dma_dir;
1697 if (dma_dir != DMA_NONE) {
2432697b 1698 unsigned int i, buflen = 0;
87260216 1699 struct scatterlist *sg;
2432697b 1700
87260216
JA
1701 for_each_sg(sgl, sg, n_elem, i)
1702 buflen += sg->length;
2432697b 1703
87260216 1704 ata_sg_init(qc, sgl, n_elem);
49c80429 1705 qc->nbytes = buflen;
a2a7a662
TH
1706 }
1707
77853bf2 1708 qc->private_data = &wait;
a2a7a662
TH
1709 qc->complete_fn = ata_qc_complete_internal;
1710
8e0e694a 1711 ata_qc_issue(qc);
a2a7a662 1712
ba6a1308 1713 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1714
2b789108
TH
1715 if (!timeout)
1716 timeout = ata_probe_timeout * 1000 / HZ;
1717
1718 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1719
1720 ata_port_flush_task(ap);
41ade50c 1721
d95a717f 1722 if (!rc) {
ba6a1308 1723 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1724
1725 /* We're racing with irq here. If we lose, the
1726 * following test prevents us from completing the qc
d95a717f
TH
1727 * twice. If we win, the port is frozen and will be
1728 * cleaned up by ->post_internal_cmd().
a2a7a662 1729 */
77853bf2 1730 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1731 qc->err_mask |= AC_ERR_TIMEOUT;
1732
1733 if (ap->ops->error_handler)
1734 ata_port_freeze(ap);
1735 else
1736 ata_qc_complete(qc);
f15a1daf 1737
0dd4b21f
BP
1738 if (ata_msg_warn(ap))
1739 ata_dev_printk(dev, KERN_WARNING,
88574551 1740 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1741 }
1742
ba6a1308 1743 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1744 }
1745
d95a717f
TH
1746 /* do post_internal_cmd */
1747 if (ap->ops->post_internal_cmd)
1748 ap->ops->post_internal_cmd(qc);
1749
a51d644a
TH
1750 /* perform minimal error analysis */
1751 if (qc->flags & ATA_QCFLAG_FAILED) {
1752 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1753 qc->err_mask |= AC_ERR_DEV;
1754
1755 if (!qc->err_mask)
1756 qc->err_mask |= AC_ERR_OTHER;
1757
1758 if (qc->err_mask & ~AC_ERR_OTHER)
1759 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1760 }
1761
15869303 1762 /* finish up */
ba6a1308 1763 spin_lock_irqsave(ap->lock, flags);
15869303 1764
e61e0672 1765 *tf = qc->result_tf;
77853bf2
TH
1766 err_mask = qc->err_mask;
1767
1768 ata_qc_free(qc);
9af5c9c9
TH
1769 link->active_tag = preempted_tag;
1770 link->sactive = preempted_sactive;
dedaf2b0 1771 ap->qc_active = preempted_qc_active;
da917d69 1772 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1773
1f7dd3e9
TH
1774 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1775 * Until those drivers are fixed, we detect the condition
1776 * here, fail the command with AC_ERR_SYSTEM and reenable the
1777 * port.
1778 *
1779 * Note that this doesn't change any behavior as internal
1780 * command failure results in disabling the device in the
1781 * higher layer for LLDDs without new reset/EH callbacks.
1782 *
1783 * Kill the following code as soon as those drivers are fixed.
1784 */
198e0fed 1785 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1786 err_mask |= AC_ERR_SYSTEM;
1787 ata_port_probe(ap);
1788 }
1789
ba6a1308 1790 spin_unlock_irqrestore(ap->lock, flags);
15869303 1791
77853bf2 1792 return err_mask;
a2a7a662
TH
1793}
1794
2432697b 1795/**
33480a0e 1796 * ata_exec_internal - execute libata internal command
2432697b
TH
1797 * @dev: Device to which the command is sent
1798 * @tf: Taskfile registers for the command and the result
1799 * @cdb: CDB for packet command
1800 * @dma_dir: Data tranfer direction of the command
1801 * @buf: Data buffer of the command
1802 * @buflen: Length of data buffer
2b789108 1803 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1804 *
1805 * Wrapper around ata_exec_internal_sg() which takes simple
1806 * buffer instead of sg list.
1807 *
1808 * LOCKING:
1809 * None. Should be called with kernel context, might sleep.
1810 *
1811 * RETURNS:
1812 * Zero on success, AC_ERR_* mask on failure
1813 */
1814unsigned ata_exec_internal(struct ata_device *dev,
1815 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1816 int dma_dir, void *buf, unsigned int buflen,
1817 unsigned long timeout)
2432697b 1818{
33480a0e
TH
1819 struct scatterlist *psg = NULL, sg;
1820 unsigned int n_elem = 0;
2432697b 1821
33480a0e
TH
1822 if (dma_dir != DMA_NONE) {
1823 WARN_ON(!buf);
1824 sg_init_one(&sg, buf, buflen);
1825 psg = &sg;
1826 n_elem++;
1827 }
2432697b 1828
2b789108
TH
1829 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1830 timeout);
2432697b
TH
1831}
1832
977e6b9f
TH
1833/**
1834 * ata_do_simple_cmd - execute simple internal command
1835 * @dev: Device to which the command is sent
1836 * @cmd: Opcode to execute
1837 *
1838 * Execute a 'simple' command, that only consists of the opcode
1839 * 'cmd' itself, without filling any other registers
1840 *
1841 * LOCKING:
1842 * Kernel thread context (may sleep).
1843 *
1844 * RETURNS:
1845 * Zero on success, AC_ERR_* mask on failure
e58eb583 1846 */
77b08fb5 1847unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1848{
1849 struct ata_taskfile tf;
e58eb583
TH
1850
1851 ata_tf_init(dev, &tf);
1852
1853 tf.command = cmd;
1854 tf.flags |= ATA_TFLAG_DEVICE;
1855 tf.protocol = ATA_PROT_NODATA;
1856
2b789108 1857 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1858}
1859
1bc4ccff
AC
1860/**
1861 * ata_pio_need_iordy - check if iordy needed
1862 * @adev: ATA device
1863 *
1864 * Check if the current speed of the device requires IORDY. Used
1865 * by various controllers for chip configuration.
1866 */
a617c09f 1867
1bc4ccff
AC
1868unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1869{
432729f0
AC
1870 /* Controller doesn't support IORDY. Probably a pointless check
1871 as the caller should know this */
9af5c9c9 1872 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1873 return 0;
432729f0
AC
1874 /* PIO3 and higher it is mandatory */
1875 if (adev->pio_mode > XFER_PIO_2)
1876 return 1;
1877 /* We turn it on when possible */
1878 if (ata_id_has_iordy(adev->id))
1bc4ccff 1879 return 1;
432729f0
AC
1880 return 0;
1881}
2e9edbf8 1882
432729f0
AC
1883/**
1884 * ata_pio_mask_no_iordy - Return the non IORDY mask
1885 * @adev: ATA device
1886 *
1887 * Compute the highest mode possible if we are not using iordy. Return
1888 * -1 if no iordy mode is available.
1889 */
a617c09f 1890
432729f0
AC
1891static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1892{
1bc4ccff 1893 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1894 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1895 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1896 /* Is the speed faster than the drive allows non IORDY ? */
1897 if (pio) {
1898 /* This is cycle times not frequency - watch the logic! */
1899 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1900 return 3 << ATA_SHIFT_PIO;
1901 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1902 }
1903 }
432729f0 1904 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1905}
1906
1da177e4 1907/**
49016aca 1908 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1909 * @dev: target device
1910 * @p_class: pointer to class of the target device (may be changed)
bff04647 1911 * @flags: ATA_READID_* flags
fe635c7e 1912 * @id: buffer to read IDENTIFY data into
1da177e4 1913 *
49016aca
TH
1914 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1915 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1916 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1917 * for pre-ATA4 drives.
1da177e4 1918 *
50a99018 1919 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1920 * now we abort if we hit that case.
50a99018 1921 *
1da177e4 1922 * LOCKING:
49016aca
TH
1923 * Kernel thread context (may sleep)
1924 *
1925 * RETURNS:
1926 * 0 on success, -errno otherwise.
1da177e4 1927 */
a9beec95 1928int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1929 unsigned int flags, u16 *id)
1da177e4 1930{
9af5c9c9 1931 struct ata_port *ap = dev->link->ap;
49016aca 1932 unsigned int class = *p_class;
a0123703 1933 struct ata_taskfile tf;
49016aca
TH
1934 unsigned int err_mask = 0;
1935 const char *reason;
54936f8b 1936 int may_fallback = 1, tried_spinup = 0;
49016aca 1937 int rc;
1da177e4 1938
0dd4b21f 1939 if (ata_msg_ctl(ap))
7f5e4e8d 1940 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 1941
49016aca 1942 retry:
3373efd8 1943 ata_tf_init(dev, &tf);
a0123703 1944
49016aca
TH
1945 switch (class) {
1946 case ATA_DEV_ATA:
a0123703 1947 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1948 break;
1949 case ATA_DEV_ATAPI:
a0123703 1950 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1951 break;
1952 default:
1953 rc = -ENODEV;
1954 reason = "unsupported class";
1955 goto err_out;
1da177e4
LT
1956 }
1957
a0123703 1958 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1959
1960 /* Some devices choke if TF registers contain garbage. Make
1961 * sure those are properly initialized.
1962 */
1963 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1964
1965 /* Device presence detection is unreliable on some
1966 * controllers. Always poll IDENTIFY if available.
1967 */
1968 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1969
3373efd8 1970 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1971 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 1972 if (err_mask) {
800b3996 1973 if (err_mask & AC_ERR_NODEV_HINT) {
1ffc151f
TH
1974 ata_dev_printk(dev, KERN_DEBUG,
1975 "NODEV after polling detection\n");
55a8e2c8
TH
1976 return -ENOENT;
1977 }
1978
1ffc151f
TH
1979 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1980 /* Device or controller might have reported
1981 * the wrong device class. Give a shot at the
1982 * other IDENTIFY if the current one is
1983 * aborted by the device.
1984 */
1985 if (may_fallback) {
1986 may_fallback = 0;
1987
1988 if (class == ATA_DEV_ATA)
1989 class = ATA_DEV_ATAPI;
1990 else
1991 class = ATA_DEV_ATA;
1992 goto retry;
1993 }
1994
1995 /* Control reaches here iff the device aborted
1996 * both flavors of IDENTIFYs which happens
1997 * sometimes with phantom devices.
1998 */
1999 ata_dev_printk(dev, KERN_DEBUG,
2000 "both IDENTIFYs aborted, assuming NODEV\n");
2001 return -ENOENT;
54936f8b
TH
2002 }
2003
49016aca
TH
2004 rc = -EIO;
2005 reason = "I/O error";
1da177e4
LT
2006 goto err_out;
2007 }
2008
54936f8b
TH
2009 /* Falling back doesn't make sense if ID data was read
2010 * successfully at least once.
2011 */
2012 may_fallback = 0;
2013
49016aca 2014 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 2015
49016aca 2016 /* sanity check */
a4f5749b 2017 rc = -EINVAL;
6070068b 2018 reason = "device reports invalid type";
a4f5749b
TH
2019
2020 if (class == ATA_DEV_ATA) {
2021 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2022 goto err_out;
2023 } else {
2024 if (ata_id_is_ata(id))
2025 goto err_out;
49016aca
TH
2026 }
2027
169439c2
ML
2028 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2029 tried_spinup = 1;
2030 /*
2031 * Drive powered-up in standby mode, and requires a specific
2032 * SET_FEATURES spin-up subcommand before it will accept
2033 * anything other than the original IDENTIFY command.
2034 */
218f3d30 2035 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 2036 if (err_mask && id[2] != 0x738c) {
169439c2
ML
2037 rc = -EIO;
2038 reason = "SPINUP failed";
2039 goto err_out;
2040 }
2041 /*
2042 * If the drive initially returned incomplete IDENTIFY info,
2043 * we now must reissue the IDENTIFY command.
2044 */
2045 if (id[2] == 0x37c8)
2046 goto retry;
2047 }
2048
bff04647 2049 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
2050 /*
2051 * The exact sequence expected by certain pre-ATA4 drives is:
2052 * SRST RESET
50a99018
AC
2053 * IDENTIFY (optional in early ATA)
2054 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
2055 * anything else..
2056 * Some drives were very specific about that exact sequence.
50a99018
AC
2057 *
2058 * Note that ATA4 says lba is mandatory so the second check
2059 * shoud never trigger.
49016aca
TH
2060 */
2061 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2062 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2063 if (err_mask) {
2064 rc = -EIO;
2065 reason = "INIT_DEV_PARAMS failed";
2066 goto err_out;
2067 }
2068
2069 /* current CHS translation info (id[53-58]) might be
2070 * changed. reread the identify device info.
2071 */
bff04647 2072 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2073 goto retry;
2074 }
2075 }
2076
2077 *p_class = class;
fe635c7e 2078
49016aca
TH
2079 return 0;
2080
2081 err_out:
88574551 2082 if (ata_msg_warn(ap))
0dd4b21f 2083 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2084 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2085 return rc;
2086}
2087
3373efd8 2088static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2089{
9af5c9c9
TH
2090 struct ata_port *ap = dev->link->ap;
2091 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2092}
2093
a6e6ce8e
TH
2094static void ata_dev_config_ncq(struct ata_device *dev,
2095 char *desc, size_t desc_sz)
2096{
9af5c9c9 2097 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2098 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2099
2100 if (!ata_id_has_ncq(dev->id)) {
2101 desc[0] = '\0';
2102 return;
2103 }
75683fe7 2104 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2105 snprintf(desc, desc_sz, "NCQ (not used)");
2106 return;
2107 }
a6e6ce8e 2108 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2109 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2110 dev->flags |= ATA_DFLAG_NCQ;
2111 }
2112
2113 if (hdepth >= ddepth)
2114 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2115 else
2116 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2117}
2118
49016aca 2119/**
ffeae418 2120 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2121 * @dev: Target device to configure
2122 *
2123 * Configure @dev according to @dev->id. Generic and low-level
2124 * driver specific fixups are also applied.
49016aca
TH
2125 *
2126 * LOCKING:
ffeae418
TH
2127 * Kernel thread context (may sleep)
2128 *
2129 * RETURNS:
2130 * 0 on success, -errno otherwise
49016aca 2131 */
efdaedc4 2132int ata_dev_configure(struct ata_device *dev)
49016aca 2133{
9af5c9c9
TH
2134 struct ata_port *ap = dev->link->ap;
2135 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2136 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2137 const u16 *id = dev->id;
7dc951ae 2138 unsigned long xfer_mask;
b352e57d 2139 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2140 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2141 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2142 int rc;
49016aca 2143
0dd4b21f 2144 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e 2145 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
7f5e4e8d 2146 __func__);
ffeae418 2147 return 0;
49016aca
TH
2148 }
2149
0dd4b21f 2150 if (ata_msg_probe(ap))
7f5e4e8d 2151 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 2152
75683fe7
TH
2153 /* set horkage */
2154 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2155 ata_force_horkage(dev);
75683fe7 2156
6746544c
TH
2157 /* let ACPI work its magic */
2158 rc = ata_acpi_on_devcfg(dev);
2159 if (rc)
2160 return rc;
08573a86 2161
05027adc
TH
2162 /* massage HPA, do it early as it might change IDENTIFY data */
2163 rc = ata_hpa_resize(dev);
2164 if (rc)
2165 return rc;
2166
c39f5ebe 2167 /* print device capabilities */
0dd4b21f 2168 if (ata_msg_probe(ap))
88574551
TH
2169 ata_dev_printk(dev, KERN_DEBUG,
2170 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2171 "85:%04x 86:%04x 87:%04x 88:%04x\n",
7f5e4e8d 2172 __func__,
f15a1daf
TH
2173 id[49], id[82], id[83], id[84],
2174 id[85], id[86], id[87], id[88]);
c39f5ebe 2175
208a9933 2176 /* initialize to-be-configured parameters */
ea1dd4e1 2177 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2178 dev->max_sectors = 0;
2179 dev->cdb_len = 0;
2180 dev->n_sectors = 0;
2181 dev->cylinders = 0;
2182 dev->heads = 0;
2183 dev->sectors = 0;
2184
1da177e4
LT
2185 /*
2186 * common ATA, ATAPI feature tests
2187 */
2188
ff8854b2 2189 /* find max transfer mode; for printk only */
1148c3a7 2190 xfer_mask = ata_id_xfermask(id);
1da177e4 2191
0dd4b21f
BP
2192 if (ata_msg_probe(ap))
2193 ata_dump_id(id);
1da177e4 2194
ef143d57
AL
2195 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2196 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2197 sizeof(fwrevbuf));
2198
2199 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2200 sizeof(modelbuf));
2201
1da177e4
LT
2202 /* ATA-specific feature tests */
2203 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2204 if (ata_id_is_cfa(id)) {
2205 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2206 ata_dev_printk(dev, KERN_WARNING,
2207 "supports DRM functions and may "
2208 "not be fully accessable.\n");
b352e57d 2209 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2210 } else {
2dcb407e 2211 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2212 /* Warn the user if the device has TPM extensions */
2213 if (ata_id_has_tpm(id))
2214 ata_dev_printk(dev, KERN_WARNING,
2215 "supports DRM functions and may "
2216 "not be fully accessable.\n");
2217 }
b352e57d 2218
1148c3a7 2219 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2220
3f64f565
EM
2221 if (dev->id[59] & 0x100)
2222 dev->multi_count = dev->id[59] & 0xff;
2223
1148c3a7 2224 if (ata_id_has_lba(id)) {
4c2d721a 2225 const char *lba_desc;
a6e6ce8e 2226 char ncq_desc[20];
8bf62ece 2227
4c2d721a
TH
2228 lba_desc = "LBA";
2229 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2230 if (ata_id_has_lba48(id)) {
8bf62ece 2231 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2232 lba_desc = "LBA48";
6fc49adb
TH
2233
2234 if (dev->n_sectors >= (1UL << 28) &&
2235 ata_id_has_flush_ext(id))
2236 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2237 }
8bf62ece 2238
a6e6ce8e
TH
2239 /* config NCQ */
2240 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2241
8bf62ece 2242 /* print device info to dmesg */
3f64f565
EM
2243 if (ata_msg_drv(ap) && print_info) {
2244 ata_dev_printk(dev, KERN_INFO,
2245 "%s: %s, %s, max %s\n",
2246 revbuf, modelbuf, fwrevbuf,
2247 ata_mode_string(xfer_mask));
2248 ata_dev_printk(dev, KERN_INFO,
2249 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2250 (unsigned long long)dev->n_sectors,
3f64f565
EM
2251 dev->multi_count, lba_desc, ncq_desc);
2252 }
ffeae418 2253 } else {
8bf62ece
AL
2254 /* CHS */
2255
2256 /* Default translation */
1148c3a7
TH
2257 dev->cylinders = id[1];
2258 dev->heads = id[3];
2259 dev->sectors = id[6];
8bf62ece 2260
1148c3a7 2261 if (ata_id_current_chs_valid(id)) {
8bf62ece 2262 /* Current CHS translation is valid. */
1148c3a7
TH
2263 dev->cylinders = id[54];
2264 dev->heads = id[55];
2265 dev->sectors = id[56];
8bf62ece
AL
2266 }
2267
2268 /* print device info to dmesg */
3f64f565 2269 if (ata_msg_drv(ap) && print_info) {
88574551 2270 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2271 "%s: %s, %s, max %s\n",
2272 revbuf, modelbuf, fwrevbuf,
2273 ata_mode_string(xfer_mask));
a84471fe 2274 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2275 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2276 (unsigned long long)dev->n_sectors,
2277 dev->multi_count, dev->cylinders,
2278 dev->heads, dev->sectors);
2279 }
07f6f7d0
AL
2280 }
2281
6e7846e9 2282 dev->cdb_len = 16;
1da177e4
LT
2283 }
2284
2285 /* ATAPI-specific feature tests */
2c13b7ce 2286 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2287 const char *cdb_intr_string = "";
2288 const char *atapi_an_string = "";
91163006 2289 const char *dma_dir_string = "";
7d77b247 2290 u32 sntf;
08a556db 2291
1148c3a7 2292 rc = atapi_cdb_len(id);
1da177e4 2293 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2294 if (ata_msg_warn(ap))
88574551
TH
2295 ata_dev_printk(dev, KERN_WARNING,
2296 "unsupported CDB len\n");
ffeae418 2297 rc = -EINVAL;
1da177e4
LT
2298 goto err_out_nosup;
2299 }
6e7846e9 2300 dev->cdb_len = (unsigned int) rc;
1da177e4 2301
7d77b247
TH
2302 /* Enable ATAPI AN if both the host and device have
2303 * the support. If PMP is attached, SNTF is required
2304 * to enable ATAPI AN to discern between PHY status
2305 * changed notifications and ATAPI ANs.
9f45cbd3 2306 */
7d77b247
TH
2307 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2308 (!ap->nr_pmp_links ||
2309 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2310 unsigned int err_mask;
2311
9f45cbd3 2312 /* issue SET feature command to turn this on */
218f3d30
JG
2313 err_mask = ata_dev_set_feature(dev,
2314 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2315 if (err_mask)
9f45cbd3 2316 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2317 "failed to enable ATAPI AN "
2318 "(err_mask=0x%x)\n", err_mask);
2319 else {
9f45cbd3 2320 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2321 atapi_an_string = ", ATAPI AN";
2322 }
9f45cbd3
KCA
2323 }
2324
08a556db 2325 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2326 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2327 cdb_intr_string = ", CDB intr";
2328 }
312f7da2 2329
91163006
TH
2330 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2331 dev->flags |= ATA_DFLAG_DMADIR;
2332 dma_dir_string = ", DMADIR";
2333 }
2334
1da177e4 2335 /* print device info to dmesg */
5afc8142 2336 if (ata_msg_drv(ap) && print_info)
ef143d57 2337 ata_dev_printk(dev, KERN_INFO,
91163006 2338 "ATAPI: %s, %s, max %s%s%s%s\n",
ef143d57 2339 modelbuf, fwrevbuf,
12436c30 2340 ata_mode_string(xfer_mask),
91163006
TH
2341 cdb_intr_string, atapi_an_string,
2342 dma_dir_string);
1da177e4
LT
2343 }
2344
914ed354
TH
2345 /* determine max_sectors */
2346 dev->max_sectors = ATA_MAX_SECTORS;
2347 if (dev->flags & ATA_DFLAG_LBA48)
2348 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2349
ca77329f
KCA
2350 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2351 if (ata_id_has_hipm(dev->id))
2352 dev->flags |= ATA_DFLAG_HIPM;
2353 if (ata_id_has_dipm(dev->id))
2354 dev->flags |= ATA_DFLAG_DIPM;
2355 }
2356
c5038fc0
AC
2357 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2358 200 sectors */
3373efd8 2359 if (ata_dev_knobble(dev)) {
5afc8142 2360 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2361 ata_dev_printk(dev, KERN_INFO,
2362 "applying bridge limits\n");
5a529139 2363 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2364 dev->max_sectors = ATA_MAX_SECTORS;
2365 }
2366
f8d8e579 2367 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2368 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2369 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2370 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2371 }
f8d8e579 2372
75683fe7 2373 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2374 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2375 dev->max_sectors);
18d6e9d5 2376
ca77329f
KCA
2377 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2378 dev->horkage |= ATA_HORKAGE_IPM;
2379
2380 /* reset link pm_policy for this port to no pm */
2381 ap->pm_policy = MAX_PERFORMANCE;
2382 }
2383
4b2f3ede 2384 if (ap->ops->dev_config)
cd0d3bbc 2385 ap->ops->dev_config(dev);
4b2f3ede 2386
c5038fc0
AC
2387 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2388 /* Let the user know. We don't want to disallow opens for
2389 rescue purposes, or in case the vendor is just a blithering
2390 idiot. Do this after the dev_config call as some controllers
2391 with buggy firmware may want to avoid reporting false device
2392 bugs */
2393
2394 if (print_info) {
2395 ata_dev_printk(dev, KERN_WARNING,
2396"Drive reports diagnostics failure. This may indicate a drive\n");
2397 ata_dev_printk(dev, KERN_WARNING,
2398"fault or invalid emulation. Contact drive vendor for information.\n");
2399 }
2400 }
2401
ffeae418 2402 return 0;
1da177e4
LT
2403
2404err_out_nosup:
0dd4b21f 2405 if (ata_msg_probe(ap))
88574551 2406 ata_dev_printk(dev, KERN_DEBUG,
7f5e4e8d 2407 "%s: EXIT, err\n", __func__);
ffeae418 2408 return rc;
1da177e4
LT
2409}
2410
be0d18df 2411/**
2e41e8e6 2412 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2413 * @ap: port
2414 *
2e41e8e6 2415 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2416 * detection.
2417 */
2418
2419int ata_cable_40wire(struct ata_port *ap)
2420{
2421 return ATA_CBL_PATA40;
2422}
2423
2424/**
2e41e8e6 2425 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2426 * @ap: port
2427 *
2e41e8e6 2428 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2429 * detection.
2430 */
2431
2432int ata_cable_80wire(struct ata_port *ap)
2433{
2434 return ATA_CBL_PATA80;
2435}
2436
2437/**
2438 * ata_cable_unknown - return unknown PATA cable.
2439 * @ap: port
2440 *
2441 * Helper method for drivers which have no PATA cable detection.
2442 */
2443
2444int ata_cable_unknown(struct ata_port *ap)
2445{
2446 return ATA_CBL_PATA_UNK;
2447}
2448
c88f90c3
TH
2449/**
2450 * ata_cable_ignore - return ignored PATA cable.
2451 * @ap: port
2452 *
2453 * Helper method for drivers which don't use cable type to limit
2454 * transfer mode.
2455 */
2456int ata_cable_ignore(struct ata_port *ap)
2457{
2458 return ATA_CBL_PATA_IGN;
2459}
2460
be0d18df
AC
2461/**
2462 * ata_cable_sata - return SATA cable type
2463 * @ap: port
2464 *
2465 * Helper method for drivers which have SATA cables
2466 */
2467
2468int ata_cable_sata(struct ata_port *ap)
2469{
2470 return ATA_CBL_SATA;
2471}
2472
1da177e4
LT
2473/**
2474 * ata_bus_probe - Reset and probe ATA bus
2475 * @ap: Bus to probe
2476 *
0cba632b
JG
2477 * Master ATA bus probing function. Initiates a hardware-dependent
2478 * bus reset, then attempts to identify any devices found on
2479 * the bus.
2480 *
1da177e4 2481 * LOCKING:
0cba632b 2482 * PCI/etc. bus probe sem.
1da177e4
LT
2483 *
2484 * RETURNS:
96072e69 2485 * Zero on success, negative errno otherwise.
1da177e4
LT
2486 */
2487
80289167 2488int ata_bus_probe(struct ata_port *ap)
1da177e4 2489{
28ca5c57 2490 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2491 int tries[ATA_MAX_DEVICES];
f58229f8 2492 int rc;
e82cbdb9 2493 struct ata_device *dev;
1da177e4 2494
28ca5c57 2495 ata_port_probe(ap);
c19ba8af 2496
f58229f8
TH
2497 ata_link_for_each_dev(dev, &ap->link)
2498 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2499
2500 retry:
cdeab114
TH
2501 ata_link_for_each_dev(dev, &ap->link) {
2502 /* If we issue an SRST then an ATA drive (not ATAPI)
2503 * may change configuration and be in PIO0 timing. If
2504 * we do a hard reset (or are coming from power on)
2505 * this is true for ATA or ATAPI. Until we've set a
2506 * suitable controller mode we should not touch the
2507 * bus as we may be talking too fast.
2508 */
2509 dev->pio_mode = XFER_PIO_0;
2510
2511 /* If the controller has a pio mode setup function
2512 * then use it to set the chipset to rights. Don't
2513 * touch the DMA setup as that will be dealt with when
2514 * configuring devices.
2515 */
2516 if (ap->ops->set_piomode)
2517 ap->ops->set_piomode(ap, dev);
2518 }
2519
2044470c 2520 /* reset and determine device classes */
52783c5d 2521 ap->ops->phy_reset(ap);
2061a47a 2522
f58229f8 2523 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2524 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2525 dev->class != ATA_DEV_UNKNOWN)
2526 classes[dev->devno] = dev->class;
2527 else
2528 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2529
52783c5d 2530 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2531 }
1da177e4 2532
52783c5d 2533 ata_port_probe(ap);
2044470c 2534
f31f0cc2
JG
2535 /* read IDENTIFY page and configure devices. We have to do the identify
2536 specific sequence bass-ackwards so that PDIAG- is released by
2537 the slave device */
2538
a4ba7fe2 2539 ata_link_for_each_dev_reverse(dev, &ap->link) {
f58229f8
TH
2540 if (tries[dev->devno])
2541 dev->class = classes[dev->devno];
ffeae418 2542
14d2bac1 2543 if (!ata_dev_enabled(dev))
ffeae418 2544 continue;
ffeae418 2545
bff04647
TH
2546 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2547 dev->id);
14d2bac1
TH
2548 if (rc)
2549 goto fail;
f31f0cc2
JG
2550 }
2551
be0d18df
AC
2552 /* Now ask for the cable type as PDIAG- should have been released */
2553 if (ap->ops->cable_detect)
2554 ap->cbl = ap->ops->cable_detect(ap);
2555
614fe29b
AC
2556 /* We may have SATA bridge glue hiding here irrespective of the
2557 reported cable types and sensed types */
2558 ata_link_for_each_dev(dev, &ap->link) {
2559 if (!ata_dev_enabled(dev))
2560 continue;
2561 /* SATA drives indicate we have a bridge. We don't know which
2562 end of the link the bridge is which is a problem */
2563 if (ata_id_is_sata(dev->id))
2564 ap->cbl = ATA_CBL_SATA;
2565 }
2566
f31f0cc2
JG
2567 /* After the identify sequence we can now set up the devices. We do
2568 this in the normal order so that the user doesn't get confused */
2569
f58229f8 2570 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2571 if (!ata_dev_enabled(dev))
2572 continue;
14d2bac1 2573
9af5c9c9 2574 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2575 rc = ata_dev_configure(dev);
9af5c9c9 2576 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2577 if (rc)
2578 goto fail;
1da177e4
LT
2579 }
2580
e82cbdb9 2581 /* configure transfer mode */
0260731f 2582 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2583 if (rc)
51713d35 2584 goto fail;
1da177e4 2585
f58229f8
TH
2586 ata_link_for_each_dev(dev, &ap->link)
2587 if (ata_dev_enabled(dev))
e82cbdb9 2588 return 0;
1da177e4 2589
e82cbdb9
TH
2590 /* no device present, disable port */
2591 ata_port_disable(ap);
96072e69 2592 return -ENODEV;
14d2bac1
TH
2593
2594 fail:
4ae72a1e
TH
2595 tries[dev->devno]--;
2596
14d2bac1
TH
2597 switch (rc) {
2598 case -EINVAL:
4ae72a1e 2599 /* eeek, something went very wrong, give up */
14d2bac1
TH
2600 tries[dev->devno] = 0;
2601 break;
4ae72a1e
TH
2602
2603 case -ENODEV:
2604 /* give it just one more chance */
2605 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2606 case -EIO:
4ae72a1e
TH
2607 if (tries[dev->devno] == 1) {
2608 /* This is the last chance, better to slow
2609 * down than lose it.
2610 */
936fd732 2611 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2612 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2613 }
14d2bac1
TH
2614 }
2615
4ae72a1e 2616 if (!tries[dev->devno])
3373efd8 2617 ata_dev_disable(dev);
ec573755 2618
14d2bac1 2619 goto retry;
1da177e4
LT
2620}
2621
2622/**
0cba632b
JG
2623 * ata_port_probe - Mark port as enabled
2624 * @ap: Port for which we indicate enablement
1da177e4 2625 *
0cba632b
JG
2626 * Modify @ap data structure such that the system
2627 * thinks that the entire port is enabled.
2628 *
cca3974e 2629 * LOCKING: host lock, or some other form of
0cba632b 2630 * serialization.
1da177e4
LT
2631 */
2632
2633void ata_port_probe(struct ata_port *ap)
2634{
198e0fed 2635 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2636}
2637
3be680b7
TH
2638/**
2639 * sata_print_link_status - Print SATA link status
936fd732 2640 * @link: SATA link to printk link status about
3be680b7
TH
2641 *
2642 * This function prints link speed and status of a SATA link.
2643 *
2644 * LOCKING:
2645 * None.
2646 */
936fd732 2647void sata_print_link_status(struct ata_link *link)
3be680b7 2648{
6d5f9732 2649 u32 sstatus, scontrol, tmp;
3be680b7 2650
936fd732 2651 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2652 return;
936fd732 2653 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2654
936fd732 2655 if (ata_link_online(link)) {
3be680b7 2656 tmp = (sstatus >> 4) & 0xf;
936fd732 2657 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2658 "SATA link up %s (SStatus %X SControl %X)\n",
2659 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2660 } else {
936fd732 2661 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2662 "SATA link down (SStatus %X SControl %X)\n",
2663 sstatus, scontrol);
3be680b7
TH
2664 }
2665}
2666
ebdfca6e
AC
2667/**
2668 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2669 * @adev: device
2670 *
2671 * Obtain the other device on the same cable, or if none is
2672 * present NULL is returned
2673 */
2e9edbf8 2674
3373efd8 2675struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2676{
9af5c9c9
TH
2677 struct ata_link *link = adev->link;
2678 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2679 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2680 return NULL;
2681 return pair;
2682}
2683
1da177e4 2684/**
780a87f7
JG
2685 * ata_port_disable - Disable port.
2686 * @ap: Port to be disabled.
1da177e4 2687 *
780a87f7
JG
2688 * Modify @ap data structure such that the system
2689 * thinks that the entire port is disabled, and should
2690 * never attempt to probe or communicate with devices
2691 * on this port.
2692 *
cca3974e 2693 * LOCKING: host lock, or some other form of
780a87f7 2694 * serialization.
1da177e4
LT
2695 */
2696
2697void ata_port_disable(struct ata_port *ap)
2698{
9af5c9c9
TH
2699 ap->link.device[0].class = ATA_DEV_NONE;
2700 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2701 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2702}
2703
1c3fae4d 2704/**
3c567b7d 2705 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2706 * @link: Link to adjust SATA spd limit for
1c3fae4d 2707 *
936fd732 2708 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2709 * function only adjusts the limit. The change must be applied
3c567b7d 2710 * using sata_set_spd().
1c3fae4d
TH
2711 *
2712 * LOCKING:
2713 * Inherited from caller.
2714 *
2715 * RETURNS:
2716 * 0 on success, negative errno on failure
2717 */
936fd732 2718int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2719{
81952c54
TH
2720 u32 sstatus, spd, mask;
2721 int rc, highbit;
1c3fae4d 2722
936fd732 2723 if (!sata_scr_valid(link))
008a7896
TH
2724 return -EOPNOTSUPP;
2725
2726 /* If SCR can be read, use it to determine the current SPD.
936fd732 2727 * If not, use cached value in link->sata_spd.
008a7896 2728 */
936fd732 2729 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2730 if (rc == 0)
2731 spd = (sstatus >> 4) & 0xf;
2732 else
936fd732 2733 spd = link->sata_spd;
1c3fae4d 2734
936fd732 2735 mask = link->sata_spd_limit;
1c3fae4d
TH
2736 if (mask <= 1)
2737 return -EINVAL;
008a7896
TH
2738
2739 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2740 highbit = fls(mask) - 1;
2741 mask &= ~(1 << highbit);
2742
008a7896
TH
2743 /* Mask off all speeds higher than or equal to the current
2744 * one. Force 1.5Gbps if current SPD is not available.
2745 */
2746 if (spd > 1)
2747 mask &= (1 << (spd - 1)) - 1;
2748 else
2749 mask &= 1;
2750
2751 /* were we already at the bottom? */
1c3fae4d
TH
2752 if (!mask)
2753 return -EINVAL;
2754
936fd732 2755 link->sata_spd_limit = mask;
1c3fae4d 2756
936fd732 2757 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2758 sata_spd_string(fls(mask)));
1c3fae4d
TH
2759
2760 return 0;
2761}
2762
936fd732 2763static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2764{
5270222f
TH
2765 struct ata_link *host_link = &link->ap->link;
2766 u32 limit, target, spd;
1c3fae4d 2767
5270222f
TH
2768 limit = link->sata_spd_limit;
2769
2770 /* Don't configure downstream link faster than upstream link.
2771 * It doesn't speed up anything and some PMPs choke on such
2772 * configuration.
2773 */
2774 if (!ata_is_host_link(link) && host_link->sata_spd)
2775 limit &= (1 << host_link->sata_spd) - 1;
2776
2777 if (limit == UINT_MAX)
2778 target = 0;
1c3fae4d 2779 else
5270222f 2780 target = fls(limit);
1c3fae4d
TH
2781
2782 spd = (*scontrol >> 4) & 0xf;
5270222f 2783 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2784
5270222f 2785 return spd != target;
1c3fae4d
TH
2786}
2787
2788/**
3c567b7d 2789 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2790 * @link: Link in question
1c3fae4d
TH
2791 *
2792 * Test whether the spd limit in SControl matches
936fd732 2793 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2794 * whether hardreset is necessary to apply SATA spd
2795 * configuration.
2796 *
2797 * LOCKING:
2798 * Inherited from caller.
2799 *
2800 * RETURNS:
2801 * 1 if SATA spd configuration is needed, 0 otherwise.
2802 */
936fd732 2803int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2804{
2805 u32 scontrol;
2806
936fd732 2807 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2808 return 1;
1c3fae4d 2809
936fd732 2810 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2811}
2812
2813/**
3c567b7d 2814 * sata_set_spd - set SATA spd according to spd limit
936fd732 2815 * @link: Link to set SATA spd for
1c3fae4d 2816 *
936fd732 2817 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2818 *
2819 * LOCKING:
2820 * Inherited from caller.
2821 *
2822 * RETURNS:
2823 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2824 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2825 */
936fd732 2826int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2827{
2828 u32 scontrol;
81952c54 2829 int rc;
1c3fae4d 2830
936fd732 2831 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2832 return rc;
1c3fae4d 2833
936fd732 2834 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2835 return 0;
2836
936fd732 2837 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2838 return rc;
2839
1c3fae4d
TH
2840 return 1;
2841}
2842
452503f9
AC
2843/*
2844 * This mode timing computation functionality is ported over from
2845 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2846 */
2847/*
b352e57d 2848 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2849 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2850 * for UDMA6, which is currently supported only by Maxtor drives.
2851 *
2852 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2853 */
2854
2855static const struct ata_timing ata_timing[] = {
70cd071e
TH
2856/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2857 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2858 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2859 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2860 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2861 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2862 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2863 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
452503f9 2864
70cd071e
TH
2865 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2866 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2867 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
452503f9 2868
70cd071e
TH
2869 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2870 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2871 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
b352e57d 2872 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
70cd071e 2873 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
452503f9
AC
2874
2875/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
70cd071e
TH
2876 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2877 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2878 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2879 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2880 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2881 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2882 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
2883
2884 { 0xFF }
2885};
2886
2dcb407e
JG
2887#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2888#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2889
2890static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2891{
2892 q->setup = EZ(t->setup * 1000, T);
2893 q->act8b = EZ(t->act8b * 1000, T);
2894 q->rec8b = EZ(t->rec8b * 1000, T);
2895 q->cyc8b = EZ(t->cyc8b * 1000, T);
2896 q->active = EZ(t->active * 1000, T);
2897 q->recover = EZ(t->recover * 1000, T);
2898 q->cycle = EZ(t->cycle * 1000, T);
2899 q->udma = EZ(t->udma * 1000, UT);
2900}
2901
2902void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2903 struct ata_timing *m, unsigned int what)
2904{
2905 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2906 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2907 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2908 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2909 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2910 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2911 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2912 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2913}
2914
6357357c 2915const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 2916{
70cd071e
TH
2917 const struct ata_timing *t = ata_timing;
2918
2919 while (xfer_mode > t->mode)
2920 t++;
452503f9 2921
70cd071e
TH
2922 if (xfer_mode == t->mode)
2923 return t;
2924 return NULL;
452503f9
AC
2925}
2926
2927int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2928 struct ata_timing *t, int T, int UT)
2929{
2930 const struct ata_timing *s;
2931 struct ata_timing p;
2932
2933 /*
2e9edbf8 2934 * Find the mode.
75b1f2f8 2935 */
452503f9
AC
2936
2937 if (!(s = ata_timing_find_mode(speed)))
2938 return -EINVAL;
2939
75b1f2f8
AL
2940 memcpy(t, s, sizeof(*s));
2941
452503f9
AC
2942 /*
2943 * If the drive is an EIDE drive, it can tell us it needs extended
2944 * PIO/MW_DMA cycle timing.
2945 */
2946
2947 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2948 memset(&p, 0, sizeof(p));
2dcb407e 2949 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
2950 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2951 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 2952 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
2953 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2954 }
2955 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2956 }
2957
2958 /*
2959 * Convert the timing to bus clock counts.
2960 */
2961
75b1f2f8 2962 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2963
2964 /*
c893a3ae
RD
2965 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2966 * S.M.A.R.T * and some other commands. We have to ensure that the
2967 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2968 */
2969
fd3367af 2970 if (speed > XFER_PIO_6) {
452503f9
AC
2971 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2972 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2973 }
2974
2975 /*
c893a3ae 2976 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2977 */
2978
2979 if (t->act8b + t->rec8b < t->cyc8b) {
2980 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2981 t->rec8b = t->cyc8b - t->act8b;
2982 }
2983
2984 if (t->active + t->recover < t->cycle) {
2985 t->active += (t->cycle - (t->active + t->recover)) / 2;
2986 t->recover = t->cycle - t->active;
2987 }
a617c09f 2988
4f701d1e
AC
2989 /* In a few cases quantisation may produce enough errors to
2990 leave t->cycle too low for the sum of active and recovery
2991 if so we must correct this */
2992 if (t->active + t->recover > t->cycle)
2993 t->cycle = t->active + t->recover;
452503f9
AC
2994
2995 return 0;
2996}
2997
a0f79b92
TH
2998/**
2999 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3000 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3001 * @cycle: cycle duration in ns
3002 *
3003 * Return matching xfer mode for @cycle. The returned mode is of
3004 * the transfer type specified by @xfer_shift. If @cycle is too
3005 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3006 * than the fastest known mode, the fasted mode is returned.
3007 *
3008 * LOCKING:
3009 * None.
3010 *
3011 * RETURNS:
3012 * Matching xfer_mode, 0xff if no match found.
3013 */
3014u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3015{
3016 u8 base_mode = 0xff, last_mode = 0xff;
3017 const struct ata_xfer_ent *ent;
3018 const struct ata_timing *t;
3019
3020 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3021 if (ent->shift == xfer_shift)
3022 base_mode = ent->base;
3023
3024 for (t = ata_timing_find_mode(base_mode);
3025 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3026 unsigned short this_cycle;
3027
3028 switch (xfer_shift) {
3029 case ATA_SHIFT_PIO:
3030 case ATA_SHIFT_MWDMA:
3031 this_cycle = t->cycle;
3032 break;
3033 case ATA_SHIFT_UDMA:
3034 this_cycle = t->udma;
3035 break;
3036 default:
3037 return 0xff;
3038 }
3039
3040 if (cycle > this_cycle)
3041 break;
3042
3043 last_mode = t->mode;
3044 }
3045
3046 return last_mode;
3047}
3048
cf176e1a
TH
3049/**
3050 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3051 * @dev: Device to adjust xfer masks
458337db 3052 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3053 *
3054 * Adjust xfer masks of @dev downward. Note that this function
3055 * does not apply the change. Invoking ata_set_mode() afterwards
3056 * will apply the limit.
3057 *
3058 * LOCKING:
3059 * Inherited from caller.
3060 *
3061 * RETURNS:
3062 * 0 on success, negative errno on failure
3063 */
458337db 3064int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3065{
458337db 3066 char buf[32];
7dc951ae
TH
3067 unsigned long orig_mask, xfer_mask;
3068 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3069 int quiet, highbit;
cf176e1a 3070
458337db
TH
3071 quiet = !!(sel & ATA_DNXFER_QUIET);
3072 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3073
458337db
TH
3074 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3075 dev->mwdma_mask,
3076 dev->udma_mask);
3077 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3078
458337db
TH
3079 switch (sel) {
3080 case ATA_DNXFER_PIO:
3081 highbit = fls(pio_mask) - 1;
3082 pio_mask &= ~(1 << highbit);
3083 break;
3084
3085 case ATA_DNXFER_DMA:
3086 if (udma_mask) {
3087 highbit = fls(udma_mask) - 1;
3088 udma_mask &= ~(1 << highbit);
3089 if (!udma_mask)
3090 return -ENOENT;
3091 } else if (mwdma_mask) {
3092 highbit = fls(mwdma_mask) - 1;
3093 mwdma_mask &= ~(1 << highbit);
3094 if (!mwdma_mask)
3095 return -ENOENT;
3096 }
3097 break;
3098
3099 case ATA_DNXFER_40C:
3100 udma_mask &= ATA_UDMA_MASK_40C;
3101 break;
3102
3103 case ATA_DNXFER_FORCE_PIO0:
3104 pio_mask &= 1;
3105 case ATA_DNXFER_FORCE_PIO:
3106 mwdma_mask = 0;
3107 udma_mask = 0;
3108 break;
3109
458337db
TH
3110 default:
3111 BUG();
3112 }
3113
3114 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3115
3116 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3117 return -ENOENT;
3118
3119 if (!quiet) {
3120 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3121 snprintf(buf, sizeof(buf), "%s:%s",
3122 ata_mode_string(xfer_mask),
3123 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3124 else
3125 snprintf(buf, sizeof(buf), "%s",
3126 ata_mode_string(xfer_mask));
3127
3128 ata_dev_printk(dev, KERN_WARNING,
3129 "limiting speed to %s\n", buf);
3130 }
cf176e1a
TH
3131
3132 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3133 &dev->udma_mask);
3134
cf176e1a 3135 return 0;
cf176e1a
TH
3136}
3137
3373efd8 3138static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3139{
9af5c9c9 3140 struct ata_eh_context *ehc = &dev->link->eh_context;
4055dee7
TH
3141 const char *dev_err_whine = "";
3142 int ign_dev_err = 0;
83206a29
TH
3143 unsigned int err_mask;
3144 int rc;
1da177e4 3145
e8384607 3146 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3147 if (dev->xfer_shift == ATA_SHIFT_PIO)
3148 dev->flags |= ATA_DFLAG_PIO;
3149
3373efd8 3150 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3151
4055dee7
TH
3152 if (err_mask & ~AC_ERR_DEV)
3153 goto fail;
3154
3155 /* revalidate */
3156 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3157 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3158 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3159 if (rc)
3160 return rc;
3161
11750a40
AC
3162 /* Old CFA may refuse this command, which is just fine */
3163 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
4055dee7 3164 ign_dev_err = 1;
2dcb407e 3165
0bc2a79a
AC
3166 /* Some very old devices and some bad newer ones fail any kind of
3167 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3168 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3169 dev->pio_mode <= XFER_PIO_2)
4055dee7 3170 ign_dev_err = 1;
2dcb407e 3171
3acaf94b
AC
3172 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3173 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3174 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3175 dev->dma_mode == XFER_MW_DMA_0 &&
3176 (dev->id[63] >> 8) & 1)
4055dee7 3177 ign_dev_err = 1;
3acaf94b 3178
4055dee7
TH
3179 /* if the device is actually configured correctly, ignore dev err */
3180 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3181 ign_dev_err = 1;
1da177e4 3182
4055dee7
TH
3183 if (err_mask & AC_ERR_DEV) {
3184 if (!ign_dev_err)
3185 goto fail;
3186 else
3187 dev_err_whine = " (device error ignored)";
3188 }
48a8a14f 3189
23e71c3d
TH
3190 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3191 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3192
4055dee7
TH
3193 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3194 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3195 dev_err_whine);
3196
83206a29 3197 return 0;
4055dee7
TH
3198
3199 fail:
3200 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3201 "(err_mask=0x%x)\n", err_mask);
3202 return -EIO;
1da177e4
LT
3203}
3204
1da177e4 3205/**
04351821 3206 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3207 * @link: link on which timings will be programmed
1967b7ff 3208 * @r_failed_dev: out parameter for failed device
1da177e4 3209 *
04351821
AC
3210 * Standard implementation of the function used to tune and set
3211 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3212 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3213 * returned in @r_failed_dev.
780a87f7 3214 *
1da177e4 3215 * LOCKING:
0cba632b 3216 * PCI/etc. bus probe sem.
e82cbdb9
TH
3217 *
3218 * RETURNS:
3219 * 0 on success, negative errno otherwise
1da177e4 3220 */
04351821 3221
0260731f 3222int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3223{
0260731f 3224 struct ata_port *ap = link->ap;
e8e0619f 3225 struct ata_device *dev;
f58229f8 3226 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3227
a6d5a51c 3228 /* step 1: calculate xfer_mask */
f58229f8 3229 ata_link_for_each_dev(dev, link) {
7dc951ae 3230 unsigned long pio_mask, dma_mask;
b3a70601 3231 unsigned int mode_mask;
a6d5a51c 3232
e1211e3f 3233 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3234 continue;
3235
b3a70601
AC
3236 mode_mask = ATA_DMA_MASK_ATA;
3237 if (dev->class == ATA_DEV_ATAPI)
3238 mode_mask = ATA_DMA_MASK_ATAPI;
3239 else if (ata_id_is_cfa(dev->id))
3240 mode_mask = ATA_DMA_MASK_CFA;
3241
3373efd8 3242 ata_dev_xfermask(dev);
33267325 3243 ata_force_xfermask(dev);
1da177e4 3244
acf356b1
TH
3245 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3246 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3247
3248 if (libata_dma_mask & mode_mask)
3249 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3250 else
3251 dma_mask = 0;
3252
acf356b1
TH
3253 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3254 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3255
4f65977d 3256 found = 1;
70cd071e 3257 if (dev->dma_mode != 0xff)
5444a6f4 3258 used_dma = 1;
a6d5a51c 3259 }
4f65977d 3260 if (!found)
e82cbdb9 3261 goto out;
a6d5a51c
TH
3262
3263 /* step 2: always set host PIO timings */
f58229f8 3264 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3265 if (!ata_dev_enabled(dev))
3266 continue;
3267
70cd071e 3268 if (dev->pio_mode == 0xff) {
f15a1daf 3269 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3270 rc = -EINVAL;
e82cbdb9 3271 goto out;
e8e0619f
TH
3272 }
3273
3274 dev->xfer_mode = dev->pio_mode;
3275 dev->xfer_shift = ATA_SHIFT_PIO;
3276 if (ap->ops->set_piomode)
3277 ap->ops->set_piomode(ap, dev);
3278 }
1da177e4 3279
a6d5a51c 3280 /* step 3: set host DMA timings */
f58229f8 3281 ata_link_for_each_dev(dev, link) {
70cd071e 3282 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
e8e0619f
TH
3283 continue;
3284
3285 dev->xfer_mode = dev->dma_mode;
3286 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3287 if (ap->ops->set_dmamode)
3288 ap->ops->set_dmamode(ap, dev);
3289 }
1da177e4
LT
3290
3291 /* step 4: update devices' xfer mode */
f58229f8 3292 ata_link_for_each_dev(dev, link) {
18d90deb 3293 /* don't update suspended devices' xfer mode */
9666f400 3294 if (!ata_dev_enabled(dev))
83206a29
TH
3295 continue;
3296
3373efd8 3297 rc = ata_dev_set_mode(dev);
5bbc53f4 3298 if (rc)
e82cbdb9 3299 goto out;
83206a29 3300 }
1da177e4 3301
e8e0619f
TH
3302 /* Record simplex status. If we selected DMA then the other
3303 * host channels are not permitted to do so.
5444a6f4 3304 */
cca3974e 3305 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3306 ap->host->simplex_claimed = ap;
5444a6f4 3307
e82cbdb9
TH
3308 out:
3309 if (rc)
3310 *r_failed_dev = dev;
3311 return rc;
1da177e4
LT
3312}
3313
aa2731ad
TH
3314/**
3315 * ata_wait_ready - wait for link to become ready
3316 * @link: link to be waited on
3317 * @deadline: deadline jiffies for the operation
3318 * @check_ready: callback to check link readiness
3319 *
3320 * Wait for @link to become ready. @check_ready should return
3321 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3322 * link doesn't seem to be occupied, other errno for other error
3323 * conditions.
3324 *
3325 * Transient -ENODEV conditions are allowed for
3326 * ATA_TMOUT_FF_WAIT.
3327 *
3328 * LOCKING:
3329 * EH context.
3330 *
3331 * RETURNS:
3332 * 0 if @linke is ready before @deadline; otherwise, -errno.
3333 */
3334int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3335 int (*check_ready)(struct ata_link *link))
3336{
3337 unsigned long start = jiffies;
3338 unsigned long nodev_deadline = start + ATA_TMOUT_FF_WAIT;
3339 int warned = 0;
3340
3341 if (time_after(nodev_deadline, deadline))
3342 nodev_deadline = deadline;
3343
3344 while (1) {
3345 unsigned long now = jiffies;
3346 int ready, tmp;
3347
3348 ready = tmp = check_ready(link);
3349 if (ready > 0)
3350 return 0;
3351
3352 /* -ENODEV could be transient. Ignore -ENODEV if link
3353 * is online. Also, some SATA devices take a long
3354 * time to clear 0xff after reset. For example,
3355 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3356 * GoVault needs even more than that. Wait for
3357 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3358 *
3359 * Note that some PATA controllers (pata_ali) explode
3360 * if status register is read more than once when
3361 * there's no device attached.
3362 */
3363 if (ready == -ENODEV) {
3364 if (ata_link_online(link))
3365 ready = 0;
3366 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3367 !ata_link_offline(link) &&
3368 time_before(now, nodev_deadline))
3369 ready = 0;
3370 }
3371
3372 if (ready)
3373 return ready;
3374 if (time_after(now, deadline))
3375 return -EBUSY;
3376
3377 if (!warned && time_after(now, start + 5 * HZ) &&
3378 (deadline - now > 3 * HZ)) {
3379 ata_link_printk(link, KERN_WARNING,
3380 "link is slow to respond, please be patient "
3381 "(ready=%d)\n", tmp);
3382 warned = 1;
3383 }
3384
3385 msleep(50);
3386 }
3387}
3388
3389/**
3390 * ata_wait_after_reset - wait for link to become ready after reset
3391 * @link: link to be waited on
3392 * @deadline: deadline jiffies for the operation
3393 * @check_ready: callback to check link readiness
3394 *
3395 * Wait for @link to become ready after reset.
3396 *
3397 * LOCKING:
3398 * EH context.
3399 *
3400 * RETURNS:
3401 * 0 if @linke is ready before @deadline; otherwise, -errno.
3402 */
3403extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3404 int (*check_ready)(struct ata_link *link))
3405{
3406 msleep(ATA_WAIT_AFTER_RESET_MSECS);
3407
3408 return ata_wait_ready(link, deadline, check_ready);
3409}
3410
d7bb4cc7 3411/**
936fd732
TH
3412 * sata_link_debounce - debounce SATA phy status
3413 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3414 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3415 * @deadline: deadline jiffies for the operation
d7bb4cc7 3416 *
936fd732 3417* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3418 * holding the same value where DET is not 1 for @duration polled
3419 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3420 * beginning of the stable state. Because DET gets stuck at 1 on
3421 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3422 * until timeout then returns 0 if DET is stable at 1.
3423 *
d4b2bab4
TH
3424 * @timeout is further limited by @deadline. The sooner of the
3425 * two is used.
3426 *
d7bb4cc7
TH
3427 * LOCKING:
3428 * Kernel thread context (may sleep)
3429 *
3430 * RETURNS:
3431 * 0 on success, -errno on failure.
3432 */
936fd732
TH
3433int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3434 unsigned long deadline)
7a7921e8 3435{
d7bb4cc7 3436 unsigned long interval_msec = params[0];
d4b2bab4
TH
3437 unsigned long duration = msecs_to_jiffies(params[1]);
3438 unsigned long last_jiffies, t;
d7bb4cc7
TH
3439 u32 last, cur;
3440 int rc;
3441
d4b2bab4
TH
3442 t = jiffies + msecs_to_jiffies(params[2]);
3443 if (time_before(t, deadline))
3444 deadline = t;
3445
936fd732 3446 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3447 return rc;
3448 cur &= 0xf;
3449
3450 last = cur;
3451 last_jiffies = jiffies;
3452
3453 while (1) {
3454 msleep(interval_msec);
936fd732 3455 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3456 return rc;
3457 cur &= 0xf;
3458
3459 /* DET stable? */
3460 if (cur == last) {
d4b2bab4 3461 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3462 continue;
3463 if (time_after(jiffies, last_jiffies + duration))
3464 return 0;
3465 continue;
3466 }
3467
3468 /* unstable, start over */
3469 last = cur;
3470 last_jiffies = jiffies;
3471
f1545154
TH
3472 /* Check deadline. If debouncing failed, return
3473 * -EPIPE to tell upper layer to lower link speed.
3474 */
d4b2bab4 3475 if (time_after(jiffies, deadline))
f1545154 3476 return -EPIPE;
d7bb4cc7
TH
3477 }
3478}
3479
3480/**
936fd732
TH
3481 * sata_link_resume - resume SATA link
3482 * @link: ATA link to resume SATA
d7bb4cc7 3483 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3484 * @deadline: deadline jiffies for the operation
d7bb4cc7 3485 *
936fd732 3486 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3487 *
3488 * LOCKING:
3489 * Kernel thread context (may sleep)
3490 *
3491 * RETURNS:
3492 * 0 on success, -errno on failure.
3493 */
936fd732
TH
3494int sata_link_resume(struct ata_link *link, const unsigned long *params,
3495 unsigned long deadline)
d7bb4cc7 3496{
ac371987 3497 u32 scontrol, serror;
81952c54
TH
3498 int rc;
3499
936fd732 3500 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3501 return rc;
7a7921e8 3502
852ee16a 3503 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3504
936fd732 3505 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3506 return rc;
7a7921e8 3507
d7bb4cc7
TH
3508 /* Some PHYs react badly if SStatus is pounded immediately
3509 * after resuming. Delay 200ms before debouncing.
3510 */
3511 msleep(200);
7a7921e8 3512
ac371987
TH
3513 if ((rc = sata_link_debounce(link, params, deadline)))
3514 return rc;
3515
3516 /* Clear SError. PMP and some host PHYs require this to
3517 * operate and clearing should be done before checking PHY
3518 * online status to avoid race condition (hotplugging between
3519 * link resume and status check).
3520 */
3521 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3522 rc = sata_scr_write(link, SCR_ERROR, serror);
3523 if (rc == 0 || rc == -EINVAL) {
3524 unsigned long flags;
3525
3526 spin_lock_irqsave(link->ap->lock, flags);
3527 link->eh_info.serror = 0;
3528 spin_unlock_irqrestore(link->ap->lock, flags);
3529 rc = 0;
3530 }
3531 return rc;
7a7921e8
TH
3532}
3533
f5914a46 3534/**
0aa1113d 3535 * ata_std_prereset - prepare for reset
cc0680a5 3536 * @link: ATA link to be reset
d4b2bab4 3537 * @deadline: deadline jiffies for the operation
f5914a46 3538 *
cc0680a5 3539 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3540 * prereset makes libata abort whole reset sequence and give up
3541 * that port, so prereset should be best-effort. It does its
3542 * best to prepare for reset sequence but if things go wrong, it
3543 * should just whine, not fail.
f5914a46
TH
3544 *
3545 * LOCKING:
3546 * Kernel thread context (may sleep)
3547 *
3548 * RETURNS:
3549 * 0 on success, -errno otherwise.
3550 */
0aa1113d 3551int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3552{
cc0680a5 3553 struct ata_port *ap = link->ap;
936fd732 3554 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3555 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3556 int rc;
3557
f5914a46
TH
3558 /* if we're about to do hardreset, nothing more to do */
3559 if (ehc->i.action & ATA_EH_HARDRESET)
3560 return 0;
3561
936fd732 3562 /* if SATA, resume link */
a16abc0b 3563 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3564 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3565 /* whine about phy resume failure but proceed */
3566 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3567 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3568 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3569 }
3570
f5914a46
TH
3571 return 0;
3572}
3573
c2bd5804 3574/**
624d5c51
TH
3575 * sata_link_hardreset - reset link via SATA phy reset
3576 * @link: link to reset
3577 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3578 * @deadline: deadline jiffies for the operation
9dadd45b
TH
3579 * @online: optional out parameter indicating link onlineness
3580 * @check_ready: optional callback to check link readiness
c2bd5804 3581 *
624d5c51 3582 * SATA phy-reset @link using DET bits of SControl register.
9dadd45b
TH
3583 * After hardreset, link readiness is waited upon using
3584 * ata_wait_ready() if @check_ready is specified. LLDs are
3585 * allowed to not specify @check_ready and wait itself after this
3586 * function returns. Device classification is LLD's
3587 * responsibility.
3588 *
3589 * *@online is set to one iff reset succeeded and @link is online
3590 * after reset.
c2bd5804
TH
3591 *
3592 * LOCKING:
3593 * Kernel thread context (may sleep)
3594 *
3595 * RETURNS:
3596 * 0 on success, -errno otherwise.
3597 */
624d5c51 3598int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
9dadd45b
TH
3599 unsigned long deadline,
3600 bool *online, int (*check_ready)(struct ata_link *))
c2bd5804 3601{
624d5c51 3602 u32 scontrol;
81952c54 3603 int rc;
852ee16a 3604
c2bd5804
TH
3605 DPRINTK("ENTER\n");
3606
9dadd45b
TH
3607 if (online)
3608 *online = false;
3609
936fd732 3610 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3611 /* SATA spec says nothing about how to reconfigure
3612 * spd. To be on the safe side, turn off phy during
3613 * reconfiguration. This works for at least ICH7 AHCI
3614 * and Sil3124.
3615 */
936fd732 3616 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3617 goto out;
81952c54 3618
a34b6fc0 3619 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3620
936fd732 3621 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3622 goto out;
1c3fae4d 3623
936fd732 3624 sata_set_spd(link);
1c3fae4d
TH
3625 }
3626
3627 /* issue phy wake/reset */
936fd732 3628 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3629 goto out;
81952c54 3630
852ee16a 3631 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3632
936fd732 3633 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3634 goto out;
c2bd5804 3635
1c3fae4d 3636 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3637 * 10.4.2 says at least 1 ms.
3638 */
3639 msleep(1);
3640
936fd732
TH
3641 /* bring link back */
3642 rc = sata_link_resume(link, timing, deadline);
9dadd45b
TH
3643 if (rc)
3644 goto out;
3645 /* if link is offline nothing more to do */
3646 if (ata_link_offline(link))
3647 goto out;
3648
3649 /* Link is online. From this point, -ENODEV too is an error. */
3650 if (online)
3651 *online = true;
3652
3653 if ((link->ap->flags & ATA_FLAG_PMP) && ata_is_host_link(link)) {
3654 /* If PMP is supported, we have to do follow-up SRST.
3655 * Some PMPs don't send D2H Reg FIS after hardreset if
3656 * the first port is empty. Wait only for
3657 * ATA_TMOUT_PMP_SRST_WAIT.
3658 */
3659 if (check_ready) {
3660 unsigned long pmp_deadline;
3661
3662 pmp_deadline = jiffies + ATA_TMOUT_PMP_SRST_WAIT;
3663 if (time_after(pmp_deadline, deadline))
3664 pmp_deadline = deadline;
3665 ata_wait_ready(link, pmp_deadline, check_ready);
3666 }
3667 rc = -EAGAIN;
3668 goto out;
3669 }
3670
3671 rc = 0;
3672 if (check_ready)
3673 rc = ata_wait_ready(link, deadline, check_ready);
b6103f6d 3674 out:
9dadd45b
TH
3675 if (rc && rc != -EAGAIN)
3676 ata_link_printk(link, KERN_ERR,
3677 "COMRESET failed (errno=%d)\n", rc);
b6103f6d
TH
3678 DPRINTK("EXIT, rc=%d\n", rc);
3679 return rc;
3680}
3681
57c9efdf
TH
3682/**
3683 * sata_std_hardreset - COMRESET w/o waiting or classification
3684 * @link: link to reset
3685 * @class: resulting class of attached device
3686 * @deadline: deadline jiffies for the operation
3687 *
3688 * Standard SATA COMRESET w/o waiting or classification.
3689 *
3690 * LOCKING:
3691 * Kernel thread context (may sleep)
3692 *
3693 * RETURNS:
3694 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3695 */
3696int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3697 unsigned long deadline)
3698{
3699 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3700 bool online;
3701 int rc;
3702
3703 /* do hardreset */
3704 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
57c9efdf
TH
3705 return online ? -EAGAIN : rc;
3706}
3707
c2bd5804 3708/**
203c75b8 3709 * ata_std_postreset - standard postreset callback
cc0680a5 3710 * @link: the target ata_link
c2bd5804
TH
3711 * @classes: classes of attached devices
3712 *
3713 * This function is invoked after a successful reset. Note that
3714 * the device might have been reset more than once using
3715 * different reset methods before postreset is invoked.
c2bd5804 3716 *
c2bd5804
TH
3717 * LOCKING:
3718 * Kernel thread context (may sleep)
3719 */
203c75b8 3720void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804
TH
3721{
3722 DPRINTK("ENTER\n");
3723
c2bd5804 3724 /* print link status */
936fd732 3725 sata_print_link_status(link);
c2bd5804 3726
c2bd5804
TH
3727 DPRINTK("EXIT\n");
3728}
3729
623a3128
TH
3730/**
3731 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3732 * @dev: device to compare against
3733 * @new_class: class of the new device
3734 * @new_id: IDENTIFY page of the new device
3735 *
3736 * Compare @new_class and @new_id against @dev and determine
3737 * whether @dev is the device indicated by @new_class and
3738 * @new_id.
3739 *
3740 * LOCKING:
3741 * None.
3742 *
3743 * RETURNS:
3744 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3745 */
3373efd8
TH
3746static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3747 const u16 *new_id)
623a3128
TH
3748{
3749 const u16 *old_id = dev->id;
a0cf733b
TH
3750 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3751 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3752
3753 if (dev->class != new_class) {
f15a1daf
TH
3754 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3755 dev->class, new_class);
623a3128
TH
3756 return 0;
3757 }
3758
a0cf733b
TH
3759 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3760 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3761 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3762 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3763
3764 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3765 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3766 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3767 return 0;
3768 }
3769
3770 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3771 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3772 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3773 return 0;
3774 }
3775
623a3128
TH
3776 return 1;
3777}
3778
3779/**
fe30911b 3780 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3781 * @dev: target ATA device
bff04647 3782 * @readid_flags: read ID flags
623a3128
TH
3783 *
3784 * Re-read IDENTIFY page and make sure @dev is still attached to
3785 * the port.
3786 *
3787 * LOCKING:
3788 * Kernel thread context (may sleep)
3789 *
3790 * RETURNS:
3791 * 0 on success, negative errno otherwise
3792 */
fe30911b 3793int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3794{
5eb45c02 3795 unsigned int class = dev->class;
9af5c9c9 3796 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3797 int rc;
3798
fe635c7e 3799 /* read ID data */
bff04647 3800 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3801 if (rc)
fe30911b 3802 return rc;
623a3128
TH
3803
3804 /* is the device still there? */
fe30911b
TH
3805 if (!ata_dev_same_device(dev, class, id))
3806 return -ENODEV;
623a3128 3807
fe635c7e 3808 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3809 return 0;
3810}
3811
3812/**
3813 * ata_dev_revalidate - Revalidate ATA device
3814 * @dev: device to revalidate
422c9daa 3815 * @new_class: new class code
fe30911b
TH
3816 * @readid_flags: read ID flags
3817 *
3818 * Re-read IDENTIFY page, make sure @dev is still attached to the
3819 * port and reconfigure it according to the new IDENTIFY page.
3820 *
3821 * LOCKING:
3822 * Kernel thread context (may sleep)
3823 *
3824 * RETURNS:
3825 * 0 on success, negative errno otherwise
3826 */
422c9daa
TH
3827int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3828 unsigned int readid_flags)
fe30911b 3829{
6ddcd3b0 3830 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3831 int rc;
3832
3833 if (!ata_dev_enabled(dev))
3834 return -ENODEV;
3835
422c9daa
TH
3836 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3837 if (ata_class_enabled(new_class) &&
3838 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3839 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3840 dev->class, new_class);
3841 rc = -ENODEV;
3842 goto fail;
3843 }
3844
fe30911b
TH
3845 /* re-read ID */
3846 rc = ata_dev_reread_id(dev, readid_flags);
3847 if (rc)
3848 goto fail;
623a3128
TH
3849
3850 /* configure device according to the new ID */
efdaedc4 3851 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3852 if (rc)
3853 goto fail;
3854
3855 /* verify n_sectors hasn't changed */
b54eebd6
TH
3856 if (dev->class == ATA_DEV_ATA && n_sectors &&
3857 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3858 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3859 "%llu != %llu\n",
3860 (unsigned long long)n_sectors,
3861 (unsigned long long)dev->n_sectors);
8270bec4
TH
3862
3863 /* restore original n_sectors */
3864 dev->n_sectors = n_sectors;
3865
6ddcd3b0
TH
3866 rc = -ENODEV;
3867 goto fail;
3868 }
3869
3870 return 0;
623a3128
TH
3871
3872 fail:
f15a1daf 3873 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3874 return rc;
3875}
3876
6919a0a6
AC
3877struct ata_blacklist_entry {
3878 const char *model_num;
3879 const char *model_rev;
3880 unsigned long horkage;
3881};
3882
3883static const struct ata_blacklist_entry ata_device_blacklist [] = {
3884 /* Devices with DMA related problems under Linux */
3885 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3886 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3887 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3888 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3889 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3890 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3891 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3892 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3893 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3894 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3895 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3896 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3897 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3898 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3899 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3900 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3901 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3902 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3903 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3904 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3905 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3906 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3907 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3908 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3909 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3910 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3911 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3912 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 3913 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 3914 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
3915 /* Odd clown on sil3726/4726 PMPs */
3916 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
3917 ATA_HORKAGE_SKIP_PM },
6919a0a6 3918
18d6e9d5 3919 /* Weird ATAPI devices */
40a1d531 3920 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3921
6919a0a6
AC
3922 /* Devices we expect to fail diagnostics */
3923
3924 /* Devices where NCQ should be avoided */
3925 /* NCQ is slow */
2dcb407e 3926 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 3927 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
3928 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3929 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3930 /* NCQ is broken */
539cc7c7 3931 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3932 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 3933 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 3934 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 3935
36e337d0
RH
3936 /* Blacklist entries taken from Silicon Image 3124/3132
3937 Windows driver .inf file - also several Linux problem reports */
3938 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3939 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3940 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 3941
16c55b03
TH
3942 /* devices which puke on READ_NATIVE_MAX */
3943 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3944 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3945 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3946 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 3947
93328e11
AC
3948 /* Devices which report 1 sector over size HPA */
3949 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
3950 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 3951 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 3952
6bbfd53d
AC
3953 /* Devices which get the IVB wrong */
3954 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
3955 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
e9f33406
PM
3956 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
3957 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
3958 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 3959
6919a0a6
AC
3960 /* End Marker */
3961 { }
1da177e4 3962};
2e9edbf8 3963
741b7763 3964static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
3965{
3966 const char *p;
3967 int len;
3968
3969 /*
3970 * check for trailing wildcard: *\0
3971 */
3972 p = strchr(patt, wildchar);
3973 if (p && ((*(p + 1)) == 0))
3974 len = p - patt;
317b50b8 3975 else {
539cc7c7 3976 len = strlen(name);
317b50b8
AP
3977 if (!len) {
3978 if (!*patt)
3979 return 0;
3980 return -1;
3981 }
3982 }
539cc7c7
JG
3983
3984 return strncmp(patt, name, len);
3985}
3986
75683fe7 3987static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 3988{
8bfa79fc
TH
3989 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3990 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3991 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3992
8bfa79fc
TH
3993 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3994 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3995
6919a0a6 3996 while (ad->model_num) {
539cc7c7 3997 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
3998 if (ad->model_rev == NULL)
3999 return ad->horkage;
539cc7c7 4000 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4001 return ad->horkage;
f4b15fef 4002 }
6919a0a6 4003 ad++;
f4b15fef 4004 }
1da177e4
LT
4005 return 0;
4006}
4007
6919a0a6
AC
4008static int ata_dma_blacklisted(const struct ata_device *dev)
4009{
4010 /* We don't support polling DMA.
4011 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4012 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4013 */
9af5c9c9 4014 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4015 (dev->flags & ATA_DFLAG_CDB_INTR))
4016 return 1;
75683fe7 4017 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4018}
4019
6bbfd53d
AC
4020/**
4021 * ata_is_40wire - check drive side detection
4022 * @dev: device
4023 *
4024 * Perform drive side detection decoding, allowing for device vendors
4025 * who can't follow the documentation.
4026 */
4027
4028static int ata_is_40wire(struct ata_device *dev)
4029{
4030 if (dev->horkage & ATA_HORKAGE_IVB)
4031 return ata_drive_40wire_relaxed(dev->id);
4032 return ata_drive_40wire(dev->id);
4033}
4034
15a5551c
AC
4035/**
4036 * cable_is_40wire - 40/80/SATA decider
4037 * @ap: port to consider
4038 *
4039 * This function encapsulates the policy for speed management
4040 * in one place. At the moment we don't cache the result but
4041 * there is a good case for setting ap->cbl to the result when
4042 * we are called with unknown cables (and figuring out if it
4043 * impacts hotplug at all).
4044 *
4045 * Return 1 if the cable appears to be 40 wire.
4046 */
4047
4048static int cable_is_40wire(struct ata_port *ap)
4049{
4050 struct ata_link *link;
4051 struct ata_device *dev;
4052
4053 /* If the controller thinks we are 40 wire, we are */
4054 if (ap->cbl == ATA_CBL_PATA40)
4055 return 1;
4056 /* If the controller thinks we are 80 wire, we are */
4057 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4058 return 0;
4059 /* If the controller doesn't know we scan
4060
4061 - Note: We look for all 40 wire detects at this point.
4062 Any 80 wire detect is taken to be 80 wire cable
4063 because
4064 - In many setups only the one drive (slave if present)
4065 will give a valid detect
4066 - If you have a non detect capable drive you don't
4067 want it to colour the choice
4068 */
4069 ata_port_for_each_link(link, ap) {
4070 ata_link_for_each_dev(dev, link) {
4071 if (!ata_is_40wire(dev))
4072 return 0;
4073 }
4074 }
4075 return 1;
4076}
4077
a6d5a51c
TH
4078/**
4079 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4080 * @dev: Device to compute xfermask for
4081 *
acf356b1
TH
4082 * Compute supported xfermask of @dev and store it in
4083 * dev->*_mask. This function is responsible for applying all
4084 * known limits including host controller limits, device
4085 * blacklist, etc...
a6d5a51c
TH
4086 *
4087 * LOCKING:
4088 * None.
a6d5a51c 4089 */
3373efd8 4090static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4091{
9af5c9c9
TH
4092 struct ata_link *link = dev->link;
4093 struct ata_port *ap = link->ap;
cca3974e 4094 struct ata_host *host = ap->host;
a6d5a51c 4095 unsigned long xfer_mask;
1da177e4 4096
37deecb5 4097 /* controller modes available */
565083e1
TH
4098 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4099 ap->mwdma_mask, ap->udma_mask);
4100
8343f889 4101 /* drive modes available */
37deecb5
TH
4102 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4103 dev->mwdma_mask, dev->udma_mask);
4104 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4105
b352e57d
AC
4106 /*
4107 * CFA Advanced TrueIDE timings are not allowed on a shared
4108 * cable
4109 */
4110 if (ata_dev_pair(dev)) {
4111 /* No PIO5 or PIO6 */
4112 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4113 /* No MWDMA3 or MWDMA 4 */
4114 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4115 }
4116
37deecb5
TH
4117 if (ata_dma_blacklisted(dev)) {
4118 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4119 ata_dev_printk(dev, KERN_WARNING,
4120 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4121 }
a6d5a51c 4122
14d66ab7 4123 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4124 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4125 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4126 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4127 "other device, disabling DMA\n");
5444a6f4 4128 }
565083e1 4129
e424675f
JG
4130 if (ap->flags & ATA_FLAG_NO_IORDY)
4131 xfer_mask &= ata_pio_mask_no_iordy(dev);
4132
5444a6f4 4133 if (ap->ops->mode_filter)
a76b62ca 4134 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4135
8343f889
RH
4136 /* Apply cable rule here. Don't apply it early because when
4137 * we handle hot plug the cable type can itself change.
4138 * Check this last so that we know if the transfer rate was
4139 * solely limited by the cable.
4140 * Unknown or 80 wire cables reported host side are checked
4141 * drive side as well. Cases where we know a 40wire cable
4142 * is used safely for 80 are not checked here.
4143 */
4144 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4145 /* UDMA/44 or higher would be available */
15a5551c 4146 if (cable_is_40wire(ap)) {
2dcb407e 4147 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4148 "limited to UDMA/33 due to 40-wire cable\n");
4149 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4150 }
4151
565083e1
TH
4152 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4153 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4154}
4155
1da177e4
LT
4156/**
4157 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4158 * @dev: Device to which command will be sent
4159 *
780a87f7
JG
4160 * Issue SET FEATURES - XFER MODE command to device @dev
4161 * on port @ap.
4162 *
1da177e4 4163 * LOCKING:
0cba632b 4164 * PCI/etc. bus probe sem.
83206a29
TH
4165 *
4166 * RETURNS:
4167 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4168 */
4169
3373efd8 4170static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4171{
a0123703 4172 struct ata_taskfile tf;
83206a29 4173 unsigned int err_mask;
1da177e4
LT
4174
4175 /* set up set-features taskfile */
4176 DPRINTK("set features - xfer mode\n");
4177
464cf177
TH
4178 /* Some controllers and ATAPI devices show flaky interrupt
4179 * behavior after setting xfer mode. Use polling instead.
4180 */
3373efd8 4181 ata_tf_init(dev, &tf);
a0123703
TH
4182 tf.command = ATA_CMD_SET_FEATURES;
4183 tf.feature = SETFEATURES_XFER;
464cf177 4184 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4185 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4186 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4187 if (ata_pio_need_iordy(dev))
4188 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4189 /* If the device has IORDY and the controller does not - turn it off */
4190 else if (ata_id_has_iordy(dev->id))
11b7becc 4191 tf.nsect = 0x01;
b9f8ab2d
AC
4192 else /* In the ancient relic department - skip all of this */
4193 return 0;
1da177e4 4194
2b789108 4195 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4196
4197 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4198 return err_mask;
4199}
9f45cbd3 4200/**
218f3d30 4201 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4202 * @dev: Device to which command will be sent
4203 * @enable: Whether to enable or disable the feature
218f3d30 4204 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4205 *
4206 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4207 * on port @ap with sector count
9f45cbd3
KCA
4208 *
4209 * LOCKING:
4210 * PCI/etc. bus probe sem.
4211 *
4212 * RETURNS:
4213 * 0 on success, AC_ERR_* mask otherwise.
4214 */
218f3d30
JG
4215static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4216 u8 feature)
9f45cbd3
KCA
4217{
4218 struct ata_taskfile tf;
4219 unsigned int err_mask;
4220
4221 /* set up set-features taskfile */
4222 DPRINTK("set features - SATA features\n");
4223
4224 ata_tf_init(dev, &tf);
4225 tf.command = ATA_CMD_SET_FEATURES;
4226 tf.feature = enable;
4227 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4228 tf.protocol = ATA_PROT_NODATA;
218f3d30 4229 tf.nsect = feature;
9f45cbd3 4230
2b789108 4231 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4232
83206a29
TH
4233 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4234 return err_mask;
1da177e4
LT
4235}
4236
8bf62ece
AL
4237/**
4238 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4239 * @dev: Device to which command will be sent
e2a7f77a
RD
4240 * @heads: Number of heads (taskfile parameter)
4241 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4242 *
4243 * LOCKING:
6aff8f1f
TH
4244 * Kernel thread context (may sleep)
4245 *
4246 * RETURNS:
4247 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4248 */
3373efd8
TH
4249static unsigned int ata_dev_init_params(struct ata_device *dev,
4250 u16 heads, u16 sectors)
8bf62ece 4251{
a0123703 4252 struct ata_taskfile tf;
6aff8f1f 4253 unsigned int err_mask;
8bf62ece
AL
4254
4255 /* Number of sectors per track 1-255. Number of heads 1-16 */
4256 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4257 return AC_ERR_INVALID;
8bf62ece
AL
4258
4259 /* set up init dev params taskfile */
4260 DPRINTK("init dev params \n");
4261
3373efd8 4262 ata_tf_init(dev, &tf);
a0123703
TH
4263 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4264 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4265 tf.protocol = ATA_PROT_NODATA;
4266 tf.nsect = sectors;
4267 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4268
2b789108 4269 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4270 /* A clean abort indicates an original or just out of spec drive
4271 and we should continue as we issue the setup based on the
4272 drive reported working geometry */
4273 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4274 err_mask = 0;
8bf62ece 4275
6aff8f1f
TH
4276 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4277 return err_mask;
8bf62ece
AL
4278}
4279
1da177e4 4280/**
0cba632b
JG
4281 * ata_sg_clean - Unmap DMA memory associated with command
4282 * @qc: Command containing DMA memory to be released
4283 *
4284 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4285 *
4286 * LOCKING:
cca3974e 4287 * spin_lock_irqsave(host lock)
1da177e4 4288 */
70e6ad0c 4289void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4290{
4291 struct ata_port *ap = qc->ap;
ff2aeb1e 4292 struct scatterlist *sg = qc->sg;
1da177e4
LT
4293 int dir = qc->dma_dir;
4294
a4631474 4295 WARN_ON(sg == NULL);
1da177e4 4296
dde20207 4297 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4298
dde20207
JB
4299 if (qc->n_elem)
4300 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
1da177e4
LT
4301
4302 qc->flags &= ~ATA_QCFLAG_DMAMAP;
ff2aeb1e 4303 qc->sg = NULL;
1da177e4
LT
4304}
4305
1da177e4
LT
4306/**
4307 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4308 * @qc: Metadata associated with taskfile to check
4309 *
780a87f7
JG
4310 * Allow low-level driver to filter ATA PACKET commands, returning
4311 * a status indicating whether or not it is OK to use DMA for the
4312 * supplied PACKET command.
4313 *
1da177e4 4314 * LOCKING:
624d5c51
TH
4315 * spin_lock_irqsave(host lock)
4316 *
4317 * RETURNS: 0 when ATAPI DMA can be used
4318 * nonzero otherwise
4319 */
4320int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4321{
4322 struct ata_port *ap = qc->ap;
71601958 4323
624d5c51
TH
4324 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4325 * few ATAPI devices choke on such DMA requests.
4326 */
4327 if (unlikely(qc->nbytes & 15))
4328 return 1;
e2cec771 4329
624d5c51
TH
4330 if (ap->ops->check_atapi_dma)
4331 return ap->ops->check_atapi_dma(qc);
e2cec771 4332
624d5c51
TH
4333 return 0;
4334}
1da177e4 4335
624d5c51
TH
4336/**
4337 * ata_std_qc_defer - Check whether a qc needs to be deferred
4338 * @qc: ATA command in question
4339 *
4340 * Non-NCQ commands cannot run with any other command, NCQ or
4341 * not. As upper layer only knows the queue depth, we are
4342 * responsible for maintaining exclusion. This function checks
4343 * whether a new command @qc can be issued.
4344 *
4345 * LOCKING:
4346 * spin_lock_irqsave(host lock)
4347 *
4348 * RETURNS:
4349 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4350 */
4351int ata_std_qc_defer(struct ata_queued_cmd *qc)
4352{
4353 struct ata_link *link = qc->dev->link;
e2cec771 4354
624d5c51
TH
4355 if (qc->tf.protocol == ATA_PROT_NCQ) {
4356 if (!ata_tag_valid(link->active_tag))
4357 return 0;
4358 } else {
4359 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4360 return 0;
4361 }
e2cec771 4362
624d5c51
TH
4363 return ATA_DEFER_LINK;
4364}
6912ccd5 4365
624d5c51 4366void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
1da177e4 4367
624d5c51
TH
4368/**
4369 * ata_sg_init - Associate command with scatter-gather table.
4370 * @qc: Command to be associated
4371 * @sg: Scatter-gather table.
4372 * @n_elem: Number of elements in s/g table.
4373 *
4374 * Initialize the data-related elements of queued_cmd @qc
4375 * to point to a scatter-gather table @sg, containing @n_elem
4376 * elements.
4377 *
4378 * LOCKING:
4379 * spin_lock_irqsave(host lock)
4380 */
4381void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4382 unsigned int n_elem)
4383{
4384 qc->sg = sg;
4385 qc->n_elem = n_elem;
4386 qc->cursg = qc->sg;
4387}
bb5cb290 4388
624d5c51
TH
4389/**
4390 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4391 * @qc: Command with scatter-gather table to be mapped.
4392 *
4393 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4394 *
4395 * LOCKING:
4396 * spin_lock_irqsave(host lock)
4397 *
4398 * RETURNS:
4399 * Zero on success, negative on error.
4400 *
4401 */
4402static int ata_sg_setup(struct ata_queued_cmd *qc)
4403{
4404 struct ata_port *ap = qc->ap;
4405 unsigned int n_elem;
1da177e4 4406
624d5c51 4407 VPRINTK("ENTER, ata%u\n", ap->print_id);
e2cec771 4408
624d5c51
TH
4409 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4410 if (n_elem < 1)
4411 return -1;
bb5cb290 4412
624d5c51 4413 DPRINTK("%d sg elements mapped\n", n_elem);
bb5cb290 4414
624d5c51
TH
4415 qc->n_elem = n_elem;
4416 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4 4417
624d5c51 4418 return 0;
1da177e4
LT
4419}
4420
624d5c51
TH
4421/**
4422 * swap_buf_le16 - swap halves of 16-bit words in place
4423 * @buf: Buffer to swap
4424 * @buf_words: Number of 16-bit words in buffer.
4425 *
4426 * Swap halves of 16-bit words if needed to convert from
4427 * little-endian byte order to native cpu byte order, or
4428 * vice-versa.
4429 *
4430 * LOCKING:
4431 * Inherited from caller.
4432 */
4433void swap_buf_le16(u16 *buf, unsigned int buf_words)
8061f5f0 4434{
624d5c51
TH
4435#ifdef __BIG_ENDIAN
4436 unsigned int i;
8061f5f0 4437
624d5c51
TH
4438 for (i = 0; i < buf_words; i++)
4439 buf[i] = le16_to_cpu(buf[i]);
4440#endif /* __BIG_ENDIAN */
8061f5f0
TH
4441}
4442
1da177e4
LT
4443/**
4444 * ata_qc_new - Request an available ATA command, for queueing
4445 * @ap: Port associated with device @dev
4446 * @dev: Device from whom we request an available command structure
4447 *
4448 * LOCKING:
0cba632b 4449 * None.
1da177e4
LT
4450 */
4451
4452static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4453{
4454 struct ata_queued_cmd *qc = NULL;
4455 unsigned int i;
4456
e3180499 4457 /* no command while frozen */
b51e9e5d 4458 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4459 return NULL;
4460
2ab7db1f
TH
4461 /* the last tag is reserved for internal command. */
4462 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4463 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4464 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4465 break;
4466 }
4467
4468 if (qc)
4469 qc->tag = i;
4470
4471 return qc;
4472}
4473
4474/**
4475 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4476 * @dev: Device from whom we request an available command structure
4477 *
4478 * LOCKING:
0cba632b 4479 * None.
1da177e4
LT
4480 */
4481
3373efd8 4482struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4483{
9af5c9c9 4484 struct ata_port *ap = dev->link->ap;
1da177e4
LT
4485 struct ata_queued_cmd *qc;
4486
4487 qc = ata_qc_new(ap);
4488 if (qc) {
1da177e4
LT
4489 qc->scsicmd = NULL;
4490 qc->ap = ap;
4491 qc->dev = dev;
1da177e4 4492
2c13b7ce 4493 ata_qc_reinit(qc);
1da177e4
LT
4494 }
4495
4496 return qc;
4497}
4498
1da177e4
LT
4499/**
4500 * ata_qc_free - free unused ata_queued_cmd
4501 * @qc: Command to complete
4502 *
4503 * Designed to free unused ata_queued_cmd object
4504 * in case something prevents using it.
4505 *
4506 * LOCKING:
cca3974e 4507 * spin_lock_irqsave(host lock)
1da177e4
LT
4508 */
4509void ata_qc_free(struct ata_queued_cmd *qc)
4510{
4ba946e9
TH
4511 struct ata_port *ap = qc->ap;
4512 unsigned int tag;
4513
a4631474 4514 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4515
4ba946e9
TH
4516 qc->flags = 0;
4517 tag = qc->tag;
4518 if (likely(ata_tag_valid(tag))) {
4ba946e9 4519 qc->tag = ATA_TAG_POISON;
6cec4a39 4520 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4521 }
1da177e4
LT
4522}
4523
76014427 4524void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4525{
dedaf2b0 4526 struct ata_port *ap = qc->ap;
9af5c9c9 4527 struct ata_link *link = qc->dev->link;
dedaf2b0 4528
a4631474
TH
4529 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4530 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4531
4532 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4533 ata_sg_clean(qc);
4534
7401abf2 4535 /* command should be marked inactive atomically with qc completion */
da917d69 4536 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 4537 link->sactive &= ~(1 << qc->tag);
da917d69
TH
4538 if (!link->sactive)
4539 ap->nr_active_links--;
4540 } else {
9af5c9c9 4541 link->active_tag = ATA_TAG_POISON;
da917d69
TH
4542 ap->nr_active_links--;
4543 }
4544
4545 /* clear exclusive status */
4546 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4547 ap->excl_link == link))
4548 ap->excl_link = NULL;
7401abf2 4549
3f3791d3
AL
4550 /* atapi: mark qc as inactive to prevent the interrupt handler
4551 * from completing the command twice later, before the error handler
4552 * is called. (when rc != 0 and atapi request sense is needed)
4553 */
4554 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4555 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4556
1da177e4 4557 /* call completion callback */
77853bf2 4558 qc->complete_fn(qc);
1da177e4
LT
4559}
4560
39599a53
TH
4561static void fill_result_tf(struct ata_queued_cmd *qc)
4562{
4563 struct ata_port *ap = qc->ap;
4564
39599a53 4565 qc->result_tf.flags = qc->tf.flags;
22183bf5 4566 ap->ops->qc_fill_rtf(qc);
39599a53
TH
4567}
4568
00115e0f
TH
4569static void ata_verify_xfer(struct ata_queued_cmd *qc)
4570{
4571 struct ata_device *dev = qc->dev;
4572
4573 if (ata_tag_internal(qc->tag))
4574 return;
4575
4576 if (ata_is_nodata(qc->tf.protocol))
4577 return;
4578
4579 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4580 return;
4581
4582 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4583}
4584
f686bcb8
TH
4585/**
4586 * ata_qc_complete - Complete an active ATA command
4587 * @qc: Command to complete
4588 * @err_mask: ATA Status register contents
4589 *
4590 * Indicate to the mid and upper layers that an ATA
4591 * command has completed, with either an ok or not-ok status.
4592 *
4593 * LOCKING:
cca3974e 4594 * spin_lock_irqsave(host lock)
f686bcb8
TH
4595 */
4596void ata_qc_complete(struct ata_queued_cmd *qc)
4597{
4598 struct ata_port *ap = qc->ap;
4599
4600 /* XXX: New EH and old EH use different mechanisms to
4601 * synchronize EH with regular execution path.
4602 *
4603 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4604 * Normal execution path is responsible for not accessing a
4605 * failed qc. libata core enforces the rule by returning NULL
4606 * from ata_qc_from_tag() for failed qcs.
4607 *
4608 * Old EH depends on ata_qc_complete() nullifying completion
4609 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4610 * not synchronize with interrupt handler. Only PIO task is
4611 * taken care of.
4612 */
4613 if (ap->ops->error_handler) {
4dbfa39b
TH
4614 struct ata_device *dev = qc->dev;
4615 struct ata_eh_info *ehi = &dev->link->eh_info;
4616
b51e9e5d 4617 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4618
4619 if (unlikely(qc->err_mask))
4620 qc->flags |= ATA_QCFLAG_FAILED;
4621
4622 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4623 if (!ata_tag_internal(qc->tag)) {
4624 /* always fill result TF for failed qc */
39599a53 4625 fill_result_tf(qc);
f686bcb8
TH
4626 ata_qc_schedule_eh(qc);
4627 return;
4628 }
4629 }
4630
4631 /* read result TF if requested */
4632 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4633 fill_result_tf(qc);
f686bcb8 4634
4dbfa39b
TH
4635 /* Some commands need post-processing after successful
4636 * completion.
4637 */
4638 switch (qc->tf.command) {
4639 case ATA_CMD_SET_FEATURES:
4640 if (qc->tf.feature != SETFEATURES_WC_ON &&
4641 qc->tf.feature != SETFEATURES_WC_OFF)
4642 break;
4643 /* fall through */
4644 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4645 case ATA_CMD_SET_MULTI: /* multi_count changed */
4646 /* revalidate device */
4647 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4648 ata_port_schedule_eh(ap);
4649 break;
054a5fba
TH
4650
4651 case ATA_CMD_SLEEP:
4652 dev->flags |= ATA_DFLAG_SLEEPING;
4653 break;
4dbfa39b
TH
4654 }
4655
00115e0f
TH
4656 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4657 ata_verify_xfer(qc);
4658
f686bcb8
TH
4659 __ata_qc_complete(qc);
4660 } else {
4661 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4662 return;
4663
4664 /* read result TF if failed or requested */
4665 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4666 fill_result_tf(qc);
f686bcb8
TH
4667
4668 __ata_qc_complete(qc);
4669 }
4670}
4671
dedaf2b0
TH
4672/**
4673 * ata_qc_complete_multiple - Complete multiple qcs successfully
4674 * @ap: port in question
4675 * @qc_active: new qc_active mask
4676 * @finish_qc: LLDD callback invoked before completing a qc
4677 *
4678 * Complete in-flight commands. This functions is meant to be
4679 * called from low-level driver's interrupt routine to complete
4680 * requests normally. ap->qc_active and @qc_active is compared
4681 * and commands are completed accordingly.
4682 *
4683 * LOCKING:
cca3974e 4684 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4685 *
4686 * RETURNS:
4687 * Number of completed commands on success, -errno otherwise.
4688 */
4689int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4690 void (*finish_qc)(struct ata_queued_cmd *))
4691{
4692 int nr_done = 0;
4693 u32 done_mask;
4694 int i;
4695
4696 done_mask = ap->qc_active ^ qc_active;
4697
4698 if (unlikely(done_mask & qc_active)) {
4699 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4700 "(%08x->%08x)\n", ap->qc_active, qc_active);
4701 return -EINVAL;
4702 }
4703
4704 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4705 struct ata_queued_cmd *qc;
4706
4707 if (!(done_mask & (1 << i)))
4708 continue;
4709
4710 if ((qc = ata_qc_from_tag(ap, i))) {
4711 if (finish_qc)
4712 finish_qc(qc);
4713 ata_qc_complete(qc);
4714 nr_done++;
4715 }
4716 }
4717
4718 return nr_done;
4719}
4720
1da177e4
LT
4721/**
4722 * ata_qc_issue - issue taskfile to device
4723 * @qc: command to issue to device
4724 *
4725 * Prepare an ATA command to submission to device.
4726 * This includes mapping the data into a DMA-able
4727 * area, filling in the S/G table, and finally
4728 * writing the taskfile to hardware, starting the command.
4729 *
4730 * LOCKING:
cca3974e 4731 * spin_lock_irqsave(host lock)
1da177e4 4732 */
8e0e694a 4733void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4734{
4735 struct ata_port *ap = qc->ap;
9af5c9c9 4736 struct ata_link *link = qc->dev->link;
405e66b3 4737 u8 prot = qc->tf.protocol;
1da177e4 4738
dedaf2b0
TH
4739 /* Make sure only one non-NCQ command is outstanding. The
4740 * check is skipped for old EH because it reuses active qc to
4741 * request ATAPI sense.
4742 */
9af5c9c9 4743 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 4744
1973a023 4745 if (ata_is_ncq(prot)) {
9af5c9c9 4746 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
4747
4748 if (!link->sactive)
4749 ap->nr_active_links++;
9af5c9c9 4750 link->sactive |= 1 << qc->tag;
dedaf2b0 4751 } else {
9af5c9c9 4752 WARN_ON(link->sactive);
da917d69
TH
4753
4754 ap->nr_active_links++;
9af5c9c9 4755 link->active_tag = qc->tag;
dedaf2b0
TH
4756 }
4757
e4a70e76 4758 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4759 ap->qc_active |= 1 << qc->tag;
e4a70e76 4760
f92a2636
TH
4761 /* We guarantee to LLDs that they will have at least one
4762 * non-zero sg if the command is a data command.
4763 */
ff2aeb1e 4764 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
f92a2636 4765
405e66b3 4766 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 4767 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7
TH
4768 if (ata_sg_setup(qc))
4769 goto sg_err;
1da177e4 4770
cf480626 4771 /* if device is sleeping, schedule reset and abort the link */
054a5fba 4772 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
cf480626 4773 link->eh_info.action |= ATA_EH_RESET;
054a5fba
TH
4774 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4775 ata_link_abort(link);
4776 return;
4777 }
4778
1da177e4
LT
4779 ap->ops->qc_prep(qc);
4780
8e0e694a
TH
4781 qc->err_mask |= ap->ops->qc_issue(qc);
4782 if (unlikely(qc->err_mask))
4783 goto err;
4784 return;
1da177e4 4785
8e436af9 4786sg_err:
8e0e694a
TH
4787 qc->err_mask |= AC_ERR_SYSTEM;
4788err:
4789 ata_qc_complete(qc);
1da177e4
LT
4790}
4791
34bf2170
TH
4792/**
4793 * sata_scr_valid - test whether SCRs are accessible
936fd732 4794 * @link: ATA link to test SCR accessibility for
34bf2170 4795 *
936fd732 4796 * Test whether SCRs are accessible for @link.
34bf2170
TH
4797 *
4798 * LOCKING:
4799 * None.
4800 *
4801 * RETURNS:
4802 * 1 if SCRs are accessible, 0 otherwise.
4803 */
936fd732 4804int sata_scr_valid(struct ata_link *link)
34bf2170 4805{
936fd732
TH
4806 struct ata_port *ap = link->ap;
4807
a16abc0b 4808 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
4809}
4810
4811/**
4812 * sata_scr_read - read SCR register of the specified port
936fd732 4813 * @link: ATA link to read SCR for
34bf2170
TH
4814 * @reg: SCR to read
4815 * @val: Place to store read value
4816 *
936fd732 4817 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
4818 * guaranteed to succeed if @link is ap->link, the cable type of
4819 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
4820 *
4821 * LOCKING:
633273a3 4822 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
4823 *
4824 * RETURNS:
4825 * 0 on success, negative errno on failure.
4826 */
936fd732 4827int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 4828{
633273a3
TH
4829 if (ata_is_host_link(link)) {
4830 struct ata_port *ap = link->ap;
936fd732 4831
633273a3
TH
4832 if (sata_scr_valid(link))
4833 return ap->ops->scr_read(ap, reg, val);
4834 return -EOPNOTSUPP;
4835 }
4836
4837 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
4838}
4839
4840/**
4841 * sata_scr_write - write SCR register of the specified port
936fd732 4842 * @link: ATA link to write SCR for
34bf2170
TH
4843 * @reg: SCR to write
4844 * @val: value to write
4845 *
936fd732 4846 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
4847 * guaranteed to succeed if @link is ap->link, the cable type of
4848 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
4849 *
4850 * LOCKING:
633273a3 4851 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
4852 *
4853 * RETURNS:
4854 * 0 on success, negative errno on failure.
4855 */
936fd732 4856int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 4857{
633273a3
TH
4858 if (ata_is_host_link(link)) {
4859 struct ata_port *ap = link->ap;
4860
4861 if (sata_scr_valid(link))
4862 return ap->ops->scr_write(ap, reg, val);
4863 return -EOPNOTSUPP;
4864 }
936fd732 4865
633273a3 4866 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
4867}
4868
4869/**
4870 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 4871 * @link: ATA link to write SCR for
34bf2170
TH
4872 * @reg: SCR to write
4873 * @val: value to write
4874 *
4875 * This function is identical to sata_scr_write() except that this
4876 * function performs flush after writing to the register.
4877 *
4878 * LOCKING:
633273a3 4879 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
4880 *
4881 * RETURNS:
4882 * 0 on success, negative errno on failure.
4883 */
936fd732 4884int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 4885{
633273a3
TH
4886 if (ata_is_host_link(link)) {
4887 struct ata_port *ap = link->ap;
4888 int rc;
da3dbb17 4889
633273a3
TH
4890 if (sata_scr_valid(link)) {
4891 rc = ap->ops->scr_write(ap, reg, val);
4892 if (rc == 0)
4893 rc = ap->ops->scr_read(ap, reg, &val);
4894 return rc;
4895 }
4896 return -EOPNOTSUPP;
34bf2170 4897 }
633273a3
TH
4898
4899 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
4900}
4901
4902/**
936fd732
TH
4903 * ata_link_online - test whether the given link is online
4904 * @link: ATA link to test
34bf2170 4905 *
936fd732
TH
4906 * Test whether @link is online. Note that this function returns
4907 * 0 if online status of @link cannot be obtained, so
4908 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
4909 *
4910 * LOCKING:
4911 * None.
4912 *
4913 * RETURNS:
4914 * 1 if the port online status is available and online.
4915 */
936fd732 4916int ata_link_online(struct ata_link *link)
34bf2170
TH
4917{
4918 u32 sstatus;
4919
936fd732
TH
4920 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4921 (sstatus & 0xf) == 0x3)
34bf2170
TH
4922 return 1;
4923 return 0;
4924}
4925
4926/**
936fd732
TH
4927 * ata_link_offline - test whether the given link is offline
4928 * @link: ATA link to test
34bf2170 4929 *
936fd732
TH
4930 * Test whether @link is offline. Note that this function
4931 * returns 0 if offline status of @link cannot be obtained, so
4932 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
4933 *
4934 * LOCKING:
4935 * None.
4936 *
4937 * RETURNS:
4938 * 1 if the port offline status is available and offline.
4939 */
936fd732 4940int ata_link_offline(struct ata_link *link)
34bf2170
TH
4941{
4942 u32 sstatus;
4943
936fd732
TH
4944 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4945 (sstatus & 0xf) != 0x3)
34bf2170
TH
4946 return 1;
4947 return 0;
4948}
0baab86b 4949
6ffa01d8 4950#ifdef CONFIG_PM
cca3974e
JG
4951static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
4952 unsigned int action, unsigned int ehi_flags,
4953 int wait)
500530f6
TH
4954{
4955 unsigned long flags;
4956 int i, rc;
4957
cca3974e
JG
4958 for (i = 0; i < host->n_ports; i++) {
4959 struct ata_port *ap = host->ports[i];
e3667ebf 4960 struct ata_link *link;
500530f6
TH
4961
4962 /* Previous resume operation might still be in
4963 * progress. Wait for PM_PENDING to clear.
4964 */
4965 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4966 ata_port_wait_eh(ap);
4967 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4968 }
4969
4970 /* request PM ops to EH */
4971 spin_lock_irqsave(ap->lock, flags);
4972
4973 ap->pm_mesg = mesg;
4974 if (wait) {
4975 rc = 0;
4976 ap->pm_result = &rc;
4977 }
4978
4979 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
4980 __ata_port_for_each_link(link, ap) {
4981 link->eh_info.action |= action;
4982 link->eh_info.flags |= ehi_flags;
4983 }
500530f6
TH
4984
4985 ata_port_schedule_eh(ap);
4986
4987 spin_unlock_irqrestore(ap->lock, flags);
4988
4989 /* wait and check result */
4990 if (wait) {
4991 ata_port_wait_eh(ap);
4992 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4993 if (rc)
4994 return rc;
4995 }
4996 }
4997
4998 return 0;
4999}
5000
5001/**
cca3974e
JG
5002 * ata_host_suspend - suspend host
5003 * @host: host to suspend
500530f6
TH
5004 * @mesg: PM message
5005 *
cca3974e 5006 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5007 * function requests EH to perform PM operations and waits for EH
5008 * to finish.
5009 *
5010 * LOCKING:
5011 * Kernel thread context (may sleep).
5012 *
5013 * RETURNS:
5014 * 0 on success, -errno on failure.
5015 */
cca3974e 5016int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5017{
9666f400 5018 int rc;
500530f6 5019
ca77329f
KCA
5020 /*
5021 * disable link pm on all ports before requesting
5022 * any pm activity
5023 */
5024 ata_lpm_enable(host);
5025
cca3974e 5026 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
72ad6ec4
JG
5027 if (rc == 0)
5028 host->dev->power.power_state = mesg;
500530f6
TH
5029 return rc;
5030}
5031
5032/**
cca3974e
JG
5033 * ata_host_resume - resume host
5034 * @host: host to resume
500530f6 5035 *
cca3974e 5036 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5037 * function requests EH to perform PM operations and returns.
5038 * Note that all resume operations are performed parallely.
5039 *
5040 * LOCKING:
5041 * Kernel thread context (may sleep).
5042 */
cca3974e 5043void ata_host_resume(struct ata_host *host)
500530f6 5044{
cf480626 5045 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
cca3974e 5046 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
72ad6ec4 5047 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
5048
5049 /* reenable link pm */
5050 ata_lpm_disable(host);
500530f6 5051}
6ffa01d8 5052#endif
500530f6 5053
c893a3ae
RD
5054/**
5055 * ata_port_start - Set port up for dma.
5056 * @ap: Port to initialize
5057 *
5058 * Called just after data structures for each port are
5059 * initialized. Allocates space for PRD table.
5060 *
5061 * May be used as the port_start() entry in ata_port_operations.
5062 *
5063 * LOCKING:
5064 * Inherited from caller.
5065 */
f0d36efd 5066int ata_port_start(struct ata_port *ap)
1da177e4 5067{
2f1f610b 5068 struct device *dev = ap->dev;
1da177e4 5069
f0d36efd
TH
5070 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5071 GFP_KERNEL);
1da177e4
LT
5072 if (!ap->prd)
5073 return -ENOMEM;
5074
1da177e4
LT
5075 return 0;
5076}
5077
3ef3b43d
TH
5078/**
5079 * ata_dev_init - Initialize an ata_device structure
5080 * @dev: Device structure to initialize
5081 *
5082 * Initialize @dev in preparation for probing.
5083 *
5084 * LOCKING:
5085 * Inherited from caller.
5086 */
5087void ata_dev_init(struct ata_device *dev)
5088{
9af5c9c9
TH
5089 struct ata_link *link = dev->link;
5090 struct ata_port *ap = link->ap;
72fa4b74
TH
5091 unsigned long flags;
5092
5a04bf4b 5093 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
5094 link->sata_spd_limit = link->hw_sata_spd_limit;
5095 link->sata_spd = 0;
5a04bf4b 5096
72fa4b74
TH
5097 /* High bits of dev->flags are used to record warm plug
5098 * requests which occur asynchronously. Synchronize using
cca3974e 5099 * host lock.
72fa4b74 5100 */
ba6a1308 5101 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5102 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 5103 dev->horkage = 0;
ba6a1308 5104 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5105
72fa4b74
TH
5106 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5107 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5108 dev->pio_mask = UINT_MAX;
5109 dev->mwdma_mask = UINT_MAX;
5110 dev->udma_mask = UINT_MAX;
5111}
5112
4fb37a25
TH
5113/**
5114 * ata_link_init - Initialize an ata_link structure
5115 * @ap: ATA port link is attached to
5116 * @link: Link structure to initialize
8989805d 5117 * @pmp: Port multiplier port number
4fb37a25
TH
5118 *
5119 * Initialize @link.
5120 *
5121 * LOCKING:
5122 * Kernel thread context (may sleep)
5123 */
fb7fd614 5124void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
5125{
5126 int i;
5127
5128 /* clear everything except for devices */
5129 memset(link, 0, offsetof(struct ata_link, device[0]));
5130
5131 link->ap = ap;
8989805d 5132 link->pmp = pmp;
4fb37a25
TH
5133 link->active_tag = ATA_TAG_POISON;
5134 link->hw_sata_spd_limit = UINT_MAX;
5135
5136 /* can't use iterator, ap isn't initialized yet */
5137 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5138 struct ata_device *dev = &link->device[i];
5139
5140 dev->link = link;
5141 dev->devno = dev - link->device;
5142 ata_dev_init(dev);
5143 }
5144}
5145
5146/**
5147 * sata_link_init_spd - Initialize link->sata_spd_limit
5148 * @link: Link to configure sata_spd_limit for
5149 *
5150 * Initialize @link->[hw_]sata_spd_limit to the currently
5151 * configured value.
5152 *
5153 * LOCKING:
5154 * Kernel thread context (may sleep).
5155 *
5156 * RETURNS:
5157 * 0 on success, -errno on failure.
5158 */
fb7fd614 5159int sata_link_init_spd(struct ata_link *link)
4fb37a25 5160{
33267325
TH
5161 u32 scontrol;
5162 u8 spd;
4fb37a25
TH
5163 int rc;
5164
5165 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
5166 if (rc)
5167 return rc;
5168
5169 spd = (scontrol >> 4) & 0xf;
5170 if (spd)
5171 link->hw_sata_spd_limit &= (1 << spd) - 1;
5172
33267325
TH
5173 ata_force_spd_limit(link);
5174
4fb37a25
TH
5175 link->sata_spd_limit = link->hw_sata_spd_limit;
5176
5177 return 0;
5178}
5179
1da177e4 5180/**
f3187195
TH
5181 * ata_port_alloc - allocate and initialize basic ATA port resources
5182 * @host: ATA host this allocated port belongs to
1da177e4 5183 *
f3187195
TH
5184 * Allocate and initialize basic ATA port resources.
5185 *
5186 * RETURNS:
5187 * Allocate ATA port on success, NULL on failure.
0cba632b 5188 *
1da177e4 5189 * LOCKING:
f3187195 5190 * Inherited from calling layer (may sleep).
1da177e4 5191 */
f3187195 5192struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5193{
f3187195 5194 struct ata_port *ap;
1da177e4 5195
f3187195
TH
5196 DPRINTK("ENTER\n");
5197
5198 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5199 if (!ap)
5200 return NULL;
5201
f4d6d004 5202 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 5203 ap->lock = &host->lock;
198e0fed 5204 ap->flags = ATA_FLAG_DISABLED;
f3187195 5205 ap->print_id = -1;
1da177e4 5206 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5207 ap->host = host;
f3187195 5208 ap->dev = host->dev;
1da177e4 5209 ap->last_ctl = 0xFF;
bd5d825c
BP
5210
5211#if defined(ATA_VERBOSE_DEBUG)
5212 /* turn on all debugging levels */
5213 ap->msg_enable = 0x00FF;
5214#elif defined(ATA_DEBUG)
5215 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5216#else
0dd4b21f 5217 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5218#endif
1da177e4 5219
442eacc3 5220 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
65f27f38
DH
5221 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5222 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5223 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5224 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
5225 init_timer_deferrable(&ap->fastdrain_timer);
5226 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5227 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 5228
838df628 5229 ap->cbl = ATA_CBL_NONE;
838df628 5230
8989805d 5231 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
5232
5233#ifdef ATA_IRQ_TRAP
5234 ap->stats.unhandled_irq = 1;
5235 ap->stats.idle_irq = 1;
5236#endif
1da177e4 5237 return ap;
1da177e4
LT
5238}
5239
f0d36efd
TH
5240static void ata_host_release(struct device *gendev, void *res)
5241{
5242 struct ata_host *host = dev_get_drvdata(gendev);
5243 int i;
5244
1aa506e4
TH
5245 for (i = 0; i < host->n_ports; i++) {
5246 struct ata_port *ap = host->ports[i];
5247
4911487a
TH
5248 if (!ap)
5249 continue;
5250
5251 if (ap->scsi_host)
1aa506e4
TH
5252 scsi_host_put(ap->scsi_host);
5253
633273a3 5254 kfree(ap->pmp_link);
4911487a 5255 kfree(ap);
1aa506e4
TH
5256 host->ports[i] = NULL;
5257 }
5258
1aa56cca 5259 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
5260}
5261
f3187195
TH
5262/**
5263 * ata_host_alloc - allocate and init basic ATA host resources
5264 * @dev: generic device this host is associated with
5265 * @max_ports: maximum number of ATA ports associated with this host
5266 *
5267 * Allocate and initialize basic ATA host resources. LLD calls
5268 * this function to allocate a host, initializes it fully and
5269 * attaches it using ata_host_register().
5270 *
5271 * @max_ports ports are allocated and host->n_ports is
5272 * initialized to @max_ports. The caller is allowed to decrease
5273 * host->n_ports before calling ata_host_register(). The unused
5274 * ports will be automatically freed on registration.
5275 *
5276 * RETURNS:
5277 * Allocate ATA host on success, NULL on failure.
5278 *
5279 * LOCKING:
5280 * Inherited from calling layer (may sleep).
5281 */
5282struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5283{
5284 struct ata_host *host;
5285 size_t sz;
5286 int i;
5287
5288 DPRINTK("ENTER\n");
5289
5290 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5291 return NULL;
5292
5293 /* alloc a container for our list of ATA ports (buses) */
5294 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5295 /* alloc a container for our list of ATA ports (buses) */
5296 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5297 if (!host)
5298 goto err_out;
5299
5300 devres_add(dev, host);
5301 dev_set_drvdata(dev, host);
5302
5303 spin_lock_init(&host->lock);
5304 host->dev = dev;
5305 host->n_ports = max_ports;
5306
5307 /* allocate ports bound to this host */
5308 for (i = 0; i < max_ports; i++) {
5309 struct ata_port *ap;
5310
5311 ap = ata_port_alloc(host);
5312 if (!ap)
5313 goto err_out;
5314
5315 ap->port_no = i;
5316 host->ports[i] = ap;
5317 }
5318
5319 devres_remove_group(dev, NULL);
5320 return host;
5321
5322 err_out:
5323 devres_release_group(dev, NULL);
5324 return NULL;
5325}
5326
f5cda257
TH
5327/**
5328 * ata_host_alloc_pinfo - alloc host and init with port_info array
5329 * @dev: generic device this host is associated with
5330 * @ppi: array of ATA port_info to initialize host with
5331 * @n_ports: number of ATA ports attached to this host
5332 *
5333 * Allocate ATA host and initialize with info from @ppi. If NULL
5334 * terminated, @ppi may contain fewer entries than @n_ports. The
5335 * last entry will be used for the remaining ports.
5336 *
5337 * RETURNS:
5338 * Allocate ATA host on success, NULL on failure.
5339 *
5340 * LOCKING:
5341 * Inherited from calling layer (may sleep).
5342 */
5343struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5344 const struct ata_port_info * const * ppi,
5345 int n_ports)
5346{
5347 const struct ata_port_info *pi;
5348 struct ata_host *host;
5349 int i, j;
5350
5351 host = ata_host_alloc(dev, n_ports);
5352 if (!host)
5353 return NULL;
5354
5355 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5356 struct ata_port *ap = host->ports[i];
5357
5358 if (ppi[j])
5359 pi = ppi[j++];
5360
5361 ap->pio_mask = pi->pio_mask;
5362 ap->mwdma_mask = pi->mwdma_mask;
5363 ap->udma_mask = pi->udma_mask;
5364 ap->flags |= pi->flags;
0c88758b 5365 ap->link.flags |= pi->link_flags;
f5cda257
TH
5366 ap->ops = pi->port_ops;
5367
5368 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5369 host->ops = pi->port_ops;
f5cda257
TH
5370 }
5371
5372 return host;
5373}
5374
32ebbc0c
TH
5375static void ata_host_stop(struct device *gendev, void *res)
5376{
5377 struct ata_host *host = dev_get_drvdata(gendev);
5378 int i;
5379
5380 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5381
5382 for (i = 0; i < host->n_ports; i++) {
5383 struct ata_port *ap = host->ports[i];
5384
5385 if (ap->ops->port_stop)
5386 ap->ops->port_stop(ap);
5387 }
5388
5389 if (host->ops->host_stop)
5390 host->ops->host_stop(host);
5391}
5392
029cfd6b
TH
5393/**
5394 * ata_finalize_port_ops - finalize ata_port_operations
5395 * @ops: ata_port_operations to finalize
5396 *
5397 * An ata_port_operations can inherit from another ops and that
5398 * ops can again inherit from another. This can go on as many
5399 * times as necessary as long as there is no loop in the
5400 * inheritance chain.
5401 *
5402 * Ops tables are finalized when the host is started. NULL or
5403 * unspecified entries are inherited from the closet ancestor
5404 * which has the method and the entry is populated with it.
5405 * After finalization, the ops table directly points to all the
5406 * methods and ->inherits is no longer necessary and cleared.
5407 *
5408 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5409 *
5410 * LOCKING:
5411 * None.
5412 */
5413static void ata_finalize_port_ops(struct ata_port_operations *ops)
5414{
5415 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
5416 const struct ata_port_operations *cur;
5417 void **begin = (void **)ops;
5418 void **end = (void **)&ops->inherits;
5419 void **pp;
5420
5421 if (!ops || !ops->inherits)
5422 return;
5423
5424 spin_lock(&lock);
5425
5426 for (cur = ops->inherits; cur; cur = cur->inherits) {
5427 void **inherit = (void **)cur;
5428
5429 for (pp = begin; pp < end; pp++, inherit++)
5430 if (!*pp)
5431 *pp = *inherit;
5432 }
5433
5434 for (pp = begin; pp < end; pp++)
5435 if (IS_ERR(*pp))
5436 *pp = NULL;
5437
5438 ops->inherits = NULL;
5439
5440 spin_unlock(&lock);
5441}
5442
ecef7253
TH
5443/**
5444 * ata_host_start - start and freeze ports of an ATA host
5445 * @host: ATA host to start ports for
5446 *
5447 * Start and then freeze ports of @host. Started status is
5448 * recorded in host->flags, so this function can be called
5449 * multiple times. Ports are guaranteed to get started only
f3187195
TH
5450 * once. If host->ops isn't initialized yet, its set to the
5451 * first non-dummy port ops.
ecef7253
TH
5452 *
5453 * LOCKING:
5454 * Inherited from calling layer (may sleep).
5455 *
5456 * RETURNS:
5457 * 0 if all ports are started successfully, -errno otherwise.
5458 */
5459int ata_host_start(struct ata_host *host)
5460{
32ebbc0c
TH
5461 int have_stop = 0;
5462 void *start_dr = NULL;
ecef7253
TH
5463 int i, rc;
5464
5465 if (host->flags & ATA_HOST_STARTED)
5466 return 0;
5467
029cfd6b
TH
5468 ata_finalize_port_ops(host->ops);
5469
ecef7253
TH
5470 for (i = 0; i < host->n_ports; i++) {
5471 struct ata_port *ap = host->ports[i];
5472
029cfd6b
TH
5473 ata_finalize_port_ops(ap->ops);
5474
f3187195
TH
5475 if (!host->ops && !ata_port_is_dummy(ap))
5476 host->ops = ap->ops;
5477
32ebbc0c
TH
5478 if (ap->ops->port_stop)
5479 have_stop = 1;
5480 }
5481
5482 if (host->ops->host_stop)
5483 have_stop = 1;
5484
5485 if (have_stop) {
5486 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5487 if (!start_dr)
5488 return -ENOMEM;
5489 }
5490
5491 for (i = 0; i < host->n_ports; i++) {
5492 struct ata_port *ap = host->ports[i];
5493
ecef7253
TH
5494 if (ap->ops->port_start) {
5495 rc = ap->ops->port_start(ap);
5496 if (rc) {
0f9fe9b7 5497 if (rc != -ENODEV)
0f757743
AM
5498 dev_printk(KERN_ERR, host->dev,
5499 "failed to start port %d "
5500 "(errno=%d)\n", i, rc);
ecef7253
TH
5501 goto err_out;
5502 }
5503 }
ecef7253
TH
5504 ata_eh_freeze_port(ap);
5505 }
5506
32ebbc0c
TH
5507 if (start_dr)
5508 devres_add(host->dev, start_dr);
ecef7253
TH
5509 host->flags |= ATA_HOST_STARTED;
5510 return 0;
5511
5512 err_out:
5513 while (--i >= 0) {
5514 struct ata_port *ap = host->ports[i];
5515
5516 if (ap->ops->port_stop)
5517 ap->ops->port_stop(ap);
5518 }
32ebbc0c 5519 devres_free(start_dr);
ecef7253
TH
5520 return rc;
5521}
5522
b03732f0 5523/**
cca3974e
JG
5524 * ata_sas_host_init - Initialize a host struct
5525 * @host: host to initialize
5526 * @dev: device host is attached to
5527 * @flags: host flags
5528 * @ops: port_ops
b03732f0
BK
5529 *
5530 * LOCKING:
5531 * PCI/etc. bus probe sem.
5532 *
5533 */
f3187195 5534/* KILLME - the only user left is ipr */
cca3974e 5535void ata_host_init(struct ata_host *host, struct device *dev,
029cfd6b 5536 unsigned long flags, struct ata_port_operations *ops)
b03732f0 5537{
cca3974e
JG
5538 spin_lock_init(&host->lock);
5539 host->dev = dev;
5540 host->flags = flags;
5541 host->ops = ops;
b03732f0
BK
5542}
5543
f3187195
TH
5544/**
5545 * ata_host_register - register initialized ATA host
5546 * @host: ATA host to register
5547 * @sht: template for SCSI host
5548 *
5549 * Register initialized ATA host. @host is allocated using
5550 * ata_host_alloc() and fully initialized by LLD. This function
5551 * starts ports, registers @host with ATA and SCSI layers and
5552 * probe registered devices.
5553 *
5554 * LOCKING:
5555 * Inherited from calling layer (may sleep).
5556 *
5557 * RETURNS:
5558 * 0 on success, -errno otherwise.
5559 */
5560int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5561{
5562 int i, rc;
5563
5564 /* host must have been started */
5565 if (!(host->flags & ATA_HOST_STARTED)) {
5566 dev_printk(KERN_ERR, host->dev,
5567 "BUG: trying to register unstarted host\n");
5568 WARN_ON(1);
5569 return -EINVAL;
5570 }
5571
5572 /* Blow away unused ports. This happens when LLD can't
5573 * determine the exact number of ports to allocate at
5574 * allocation time.
5575 */
5576 for (i = host->n_ports; host->ports[i]; i++)
5577 kfree(host->ports[i]);
5578
5579 /* give ports names and add SCSI hosts */
5580 for (i = 0; i < host->n_ports; i++)
5581 host->ports[i]->print_id = ata_print_id++;
5582
5583 rc = ata_scsi_add_hosts(host, sht);
5584 if (rc)
5585 return rc;
5586
fafbae87
TH
5587 /* associate with ACPI nodes */
5588 ata_acpi_associate(host);
5589
f3187195
TH
5590 /* set cable, sata_spd_limit and report */
5591 for (i = 0; i < host->n_ports; i++) {
5592 struct ata_port *ap = host->ports[i];
f3187195
TH
5593 unsigned long xfer_mask;
5594
5595 /* set SATA cable type if still unset */
5596 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5597 ap->cbl = ATA_CBL_SATA;
5598
5599 /* init sata_spd_limit to the current value */
4fb37a25 5600 sata_link_init_spd(&ap->link);
f3187195 5601
cbcdd875 5602 /* print per-port info to dmesg */
f3187195
TH
5603 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5604 ap->udma_mask);
5605
abf6e8ed 5606 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
5607 ata_port_printk(ap, KERN_INFO,
5608 "%cATA max %s %s\n",
a16abc0b 5609 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 5610 ata_mode_string(xfer_mask),
cbcdd875 5611 ap->link.eh_info.desc);
abf6e8ed
TH
5612 ata_ehi_clear_desc(&ap->link.eh_info);
5613 } else
f3187195
TH
5614 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5615 }
5616
5617 /* perform each probe synchronously */
5618 DPRINTK("probe begin\n");
5619 for (i = 0; i < host->n_ports; i++) {
5620 struct ata_port *ap = host->ports[i];
f3187195
TH
5621
5622 /* probe */
5623 if (ap->ops->error_handler) {
9af5c9c9 5624 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
5625 unsigned long flags;
5626
5627 ata_port_probe(ap);
5628
5629 /* kick EH for boot probing */
5630 spin_lock_irqsave(ap->lock, flags);
5631
b558eddd 5632 ehi->probe_mask |= ATA_ALL_DEVICES;
cf480626 5633 ehi->action |= ATA_EH_RESET;
f3187195
TH
5634 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5635
f4d6d004 5636 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
5637 ap->pflags |= ATA_PFLAG_LOADING;
5638 ata_port_schedule_eh(ap);
5639
5640 spin_unlock_irqrestore(ap->lock, flags);
5641
5642 /* wait for EH to finish */
5643 ata_port_wait_eh(ap);
5644 } else {
5645 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5646 rc = ata_bus_probe(ap);
5647 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5648
5649 if (rc) {
5650 /* FIXME: do something useful here?
5651 * Current libata behavior will
5652 * tear down everything when
5653 * the module is removed
5654 * or the h/w is unplugged.
5655 */
5656 }
5657 }
5658 }
5659
5660 /* probes are done, now scan each port's disk(s) */
5661 DPRINTK("host probe begin\n");
5662 for (i = 0; i < host->n_ports; i++) {
5663 struct ata_port *ap = host->ports[i];
5664
1ae46317 5665 ata_scsi_scan_host(ap, 1);
ca77329f 5666 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
5667 }
5668
5669 return 0;
5670}
5671
f5cda257
TH
5672/**
5673 * ata_host_activate - start host, request IRQ and register it
5674 * @host: target ATA host
5675 * @irq: IRQ to request
5676 * @irq_handler: irq_handler used when requesting IRQ
5677 * @irq_flags: irq_flags used when requesting IRQ
5678 * @sht: scsi_host_template to use when registering the host
5679 *
5680 * After allocating an ATA host and initializing it, most libata
5681 * LLDs perform three steps to activate the host - start host,
5682 * request IRQ and register it. This helper takes necessasry
5683 * arguments and performs the three steps in one go.
5684 *
3d46b2e2
PM
5685 * An invalid IRQ skips the IRQ registration and expects the host to
5686 * have set polling mode on the port. In this case, @irq_handler
5687 * should be NULL.
5688 *
f5cda257
TH
5689 * LOCKING:
5690 * Inherited from calling layer (may sleep).
5691 *
5692 * RETURNS:
5693 * 0 on success, -errno otherwise.
5694 */
5695int ata_host_activate(struct ata_host *host, int irq,
5696 irq_handler_t irq_handler, unsigned long irq_flags,
5697 struct scsi_host_template *sht)
5698{
cbcdd875 5699 int i, rc;
f5cda257
TH
5700
5701 rc = ata_host_start(host);
5702 if (rc)
5703 return rc;
5704
3d46b2e2
PM
5705 /* Special case for polling mode */
5706 if (!irq) {
5707 WARN_ON(irq_handler);
5708 return ata_host_register(host, sht);
5709 }
5710
f5cda257
TH
5711 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5712 dev_driver_string(host->dev), host);
5713 if (rc)
5714 return rc;
5715
cbcdd875
TH
5716 for (i = 0; i < host->n_ports; i++)
5717 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 5718
f5cda257
TH
5719 rc = ata_host_register(host, sht);
5720 /* if failed, just free the IRQ and leave ports alone */
5721 if (rc)
5722 devm_free_irq(host->dev, irq, host);
5723
5724 return rc;
5725}
5726
720ba126
TH
5727/**
5728 * ata_port_detach - Detach ATA port in prepration of device removal
5729 * @ap: ATA port to be detached
5730 *
5731 * Detach all ATA devices and the associated SCSI devices of @ap;
5732 * then, remove the associated SCSI host. @ap is guaranteed to
5733 * be quiescent on return from this function.
5734 *
5735 * LOCKING:
5736 * Kernel thread context (may sleep).
5737 */
741b7763 5738static void ata_port_detach(struct ata_port *ap)
720ba126
TH
5739{
5740 unsigned long flags;
41bda9c9 5741 struct ata_link *link;
f58229f8 5742 struct ata_device *dev;
720ba126
TH
5743
5744 if (!ap->ops->error_handler)
c3cf30a9 5745 goto skip_eh;
720ba126
TH
5746
5747 /* tell EH we're leaving & flush EH */
ba6a1308 5748 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5749 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5750 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5751
5752 ata_port_wait_eh(ap);
5753
7f9ad9b8
TH
5754 /* EH is now guaranteed to see UNLOADING - EH context belongs
5755 * to us. Disable all existing devices.
720ba126 5756 */
41bda9c9
TH
5757 ata_port_for_each_link(link, ap) {
5758 ata_link_for_each_dev(dev, link)
5759 ata_dev_disable(dev);
5760 }
720ba126 5761
720ba126
TH
5762 /* Final freeze & EH. All in-flight commands are aborted. EH
5763 * will be skipped and retrials will be terminated with bad
5764 * target.
5765 */
ba6a1308 5766 spin_lock_irqsave(ap->lock, flags);
720ba126 5767 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5768 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5769
5770 ata_port_wait_eh(ap);
45a66c1c 5771 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 5772
c3cf30a9 5773 skip_eh:
720ba126 5774 /* remove the associated SCSI host */
cca3974e 5775 scsi_remove_host(ap->scsi_host);
720ba126
TH
5776}
5777
0529c159
TH
5778/**
5779 * ata_host_detach - Detach all ports of an ATA host
5780 * @host: Host to detach
5781 *
5782 * Detach all ports of @host.
5783 *
5784 * LOCKING:
5785 * Kernel thread context (may sleep).
5786 */
5787void ata_host_detach(struct ata_host *host)
5788{
5789 int i;
5790
5791 for (i = 0; i < host->n_ports; i++)
5792 ata_port_detach(host->ports[i]);
562f0c2d
TH
5793
5794 /* the host is dead now, dissociate ACPI */
5795 ata_acpi_dissociate(host);
0529c159
TH
5796}
5797
374b1873
JG
5798#ifdef CONFIG_PCI
5799
1da177e4
LT
5800/**
5801 * ata_pci_remove_one - PCI layer callback for device removal
5802 * @pdev: PCI device that was removed
5803 *
b878ca5d
TH
5804 * PCI layer indicates to libata via this hook that hot-unplug or
5805 * module unload event has occurred. Detach all ports. Resource
5806 * release is handled via devres.
1da177e4
LT
5807 *
5808 * LOCKING:
5809 * Inherited from PCI layer (may sleep).
5810 */
f0d36efd 5811void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 5812{
2855568b 5813 struct device *dev = &pdev->dev;
cca3974e 5814 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 5815
b878ca5d 5816 ata_host_detach(host);
1da177e4
LT
5817}
5818
5819/* move to PCI subsystem */
057ace5e 5820int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
5821{
5822 unsigned long tmp = 0;
5823
5824 switch (bits->width) {
5825 case 1: {
5826 u8 tmp8 = 0;
5827 pci_read_config_byte(pdev, bits->reg, &tmp8);
5828 tmp = tmp8;
5829 break;
5830 }
5831 case 2: {
5832 u16 tmp16 = 0;
5833 pci_read_config_word(pdev, bits->reg, &tmp16);
5834 tmp = tmp16;
5835 break;
5836 }
5837 case 4: {
5838 u32 tmp32 = 0;
5839 pci_read_config_dword(pdev, bits->reg, &tmp32);
5840 tmp = tmp32;
5841 break;
5842 }
5843
5844 default:
5845 return -EINVAL;
5846 }
5847
5848 tmp &= bits->mask;
5849
5850 return (tmp == bits->val) ? 1 : 0;
5851}
9b847548 5852
6ffa01d8 5853#ifdef CONFIG_PM
3c5100c1 5854void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
5855{
5856 pci_save_state(pdev);
4c90d971 5857 pci_disable_device(pdev);
500530f6 5858
3a2d5b70 5859 if (mesg.event & PM_EVENT_SLEEP)
500530f6 5860 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
5861}
5862
553c4aa6 5863int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 5864{
553c4aa6
TH
5865 int rc;
5866
9b847548
JA
5867 pci_set_power_state(pdev, PCI_D0);
5868 pci_restore_state(pdev);
553c4aa6 5869
b878ca5d 5870 rc = pcim_enable_device(pdev);
553c4aa6
TH
5871 if (rc) {
5872 dev_printk(KERN_ERR, &pdev->dev,
5873 "failed to enable device after resume (%d)\n", rc);
5874 return rc;
5875 }
5876
9b847548 5877 pci_set_master(pdev);
553c4aa6 5878 return 0;
500530f6
TH
5879}
5880
3c5100c1 5881int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 5882{
cca3974e 5883 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
5884 int rc = 0;
5885
cca3974e 5886 rc = ata_host_suspend(host, mesg);
500530f6
TH
5887 if (rc)
5888 return rc;
5889
3c5100c1 5890 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
5891
5892 return 0;
5893}
5894
5895int ata_pci_device_resume(struct pci_dev *pdev)
5896{
cca3974e 5897 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 5898 int rc;
500530f6 5899
553c4aa6
TH
5900 rc = ata_pci_device_do_resume(pdev);
5901 if (rc == 0)
5902 ata_host_resume(host);
5903 return rc;
9b847548 5904}
6ffa01d8
TH
5905#endif /* CONFIG_PM */
5906
1da177e4
LT
5907#endif /* CONFIG_PCI */
5908
33267325
TH
5909static int __init ata_parse_force_one(char **cur,
5910 struct ata_force_ent *force_ent,
5911 const char **reason)
5912{
5913 /* FIXME: Currently, there's no way to tag init const data and
5914 * using __initdata causes build failure on some versions of
5915 * gcc. Once __initdataconst is implemented, add const to the
5916 * following structure.
5917 */
5918 static struct ata_force_param force_tbl[] __initdata = {
5919 { "40c", .cbl = ATA_CBL_PATA40 },
5920 { "80c", .cbl = ATA_CBL_PATA80 },
5921 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
5922 { "unk", .cbl = ATA_CBL_PATA_UNK },
5923 { "ign", .cbl = ATA_CBL_PATA_IGN },
5924 { "sata", .cbl = ATA_CBL_SATA },
5925 { "1.5Gbps", .spd_limit = 1 },
5926 { "3.0Gbps", .spd_limit = 2 },
5927 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
5928 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
5929 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
5930 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
5931 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
5932 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
5933 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
5934 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
5935 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
5936 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
5937 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
5938 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
5939 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
5940 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
5941 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5942 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5943 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5944 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5945 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5946 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5947 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5948 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5949 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5950 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5951 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5952 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5953 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5954 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5955 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5956 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5957 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5958 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5959 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5960 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5961 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5962 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
5963 };
5964 char *start = *cur, *p = *cur;
5965 char *id, *val, *endp;
5966 const struct ata_force_param *match_fp = NULL;
5967 int nr_matches = 0, i;
5968
5969 /* find where this param ends and update *cur */
5970 while (*p != '\0' && *p != ',')
5971 p++;
5972
5973 if (*p == '\0')
5974 *cur = p;
5975 else
5976 *cur = p + 1;
5977
5978 *p = '\0';
5979
5980 /* parse */
5981 p = strchr(start, ':');
5982 if (!p) {
5983 val = strstrip(start);
5984 goto parse_val;
5985 }
5986 *p = '\0';
5987
5988 id = strstrip(start);
5989 val = strstrip(p + 1);
5990
5991 /* parse id */
5992 p = strchr(id, '.');
5993 if (p) {
5994 *p++ = '\0';
5995 force_ent->device = simple_strtoul(p, &endp, 10);
5996 if (p == endp || *endp != '\0') {
5997 *reason = "invalid device";
5998 return -EINVAL;
5999 }
6000 }
6001
6002 force_ent->port = simple_strtoul(id, &endp, 10);
6003 if (p == endp || *endp != '\0') {
6004 *reason = "invalid port/link";
6005 return -EINVAL;
6006 }
6007
6008 parse_val:
6009 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6010 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6011 const struct ata_force_param *fp = &force_tbl[i];
6012
6013 if (strncasecmp(val, fp->name, strlen(val)))
6014 continue;
6015
6016 nr_matches++;
6017 match_fp = fp;
6018
6019 if (strcasecmp(val, fp->name) == 0) {
6020 nr_matches = 1;
6021 break;
6022 }
6023 }
6024
6025 if (!nr_matches) {
6026 *reason = "unknown value";
6027 return -EINVAL;
6028 }
6029 if (nr_matches > 1) {
6030 *reason = "ambigious value";
6031 return -EINVAL;
6032 }
6033
6034 force_ent->param = *match_fp;
6035
6036 return 0;
6037}
6038
6039static void __init ata_parse_force_param(void)
6040{
6041 int idx = 0, size = 1;
6042 int last_port = -1, last_device = -1;
6043 char *p, *cur, *next;
6044
6045 /* calculate maximum number of params and allocate force_tbl */
6046 for (p = ata_force_param_buf; *p; p++)
6047 if (*p == ',')
6048 size++;
6049
6050 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6051 if (!ata_force_tbl) {
6052 printk(KERN_WARNING "ata: failed to extend force table, "
6053 "libata.force ignored\n");
6054 return;
6055 }
6056
6057 /* parse and populate the table */
6058 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6059 const char *reason = "";
6060 struct ata_force_ent te = { .port = -1, .device = -1 };
6061
6062 next = cur;
6063 if (ata_parse_force_one(&next, &te, &reason)) {
6064 printk(KERN_WARNING "ata: failed to parse force "
6065 "parameter \"%s\" (%s)\n",
6066 cur, reason);
6067 continue;
6068 }
6069
6070 if (te.port == -1) {
6071 te.port = last_port;
6072 te.device = last_device;
6073 }
6074
6075 ata_force_tbl[idx++] = te;
6076
6077 last_port = te.port;
6078 last_device = te.device;
6079 }
6080
6081 ata_force_tbl_size = idx;
6082}
1da177e4 6083
1da177e4
LT
6084static int __init ata_init(void)
6085{
a8601e5f 6086 ata_probe_timeout *= HZ;
33267325
TH
6087
6088 ata_parse_force_param();
6089
1da177e4
LT
6090 ata_wq = create_workqueue("ata");
6091 if (!ata_wq)
6092 return -ENOMEM;
6093
453b07ac
TH
6094 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6095 if (!ata_aux_wq) {
6096 destroy_workqueue(ata_wq);
6097 return -ENOMEM;
6098 }
6099
1da177e4
LT
6100 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6101 return 0;
6102}
6103
6104static void __exit ata_exit(void)
6105{
33267325 6106 kfree(ata_force_tbl);
1da177e4 6107 destroy_workqueue(ata_wq);
453b07ac 6108 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6109}
6110
a4625085 6111subsys_initcall(ata_init);
1da177e4
LT
6112module_exit(ata_exit);
6113
67846b30 6114static unsigned long ratelimit_time;
34af946a 6115static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6116
6117int ata_ratelimit(void)
6118{
6119 int rc;
6120 unsigned long flags;
6121
6122 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6123
6124 if (time_after(jiffies, ratelimit_time)) {
6125 rc = 1;
6126 ratelimit_time = jiffies + (HZ/5);
6127 } else
6128 rc = 0;
6129
6130 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6131
6132 return rc;
6133}
6134
c22daff4
TH
6135/**
6136 * ata_wait_register - wait until register value changes
6137 * @reg: IO-mapped register
6138 * @mask: Mask to apply to read register value
6139 * @val: Wait condition
6140 * @interval_msec: polling interval in milliseconds
6141 * @timeout_msec: timeout in milliseconds
6142 *
6143 * Waiting for some bits of register to change is a common
6144 * operation for ATA controllers. This function reads 32bit LE
6145 * IO-mapped register @reg and tests for the following condition.
6146 *
6147 * (*@reg & mask) != val
6148 *
6149 * If the condition is met, it returns; otherwise, the process is
6150 * repeated after @interval_msec until timeout.
6151 *
6152 * LOCKING:
6153 * Kernel thread context (may sleep)
6154 *
6155 * RETURNS:
6156 * The final register value.
6157 */
6158u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6159 unsigned long interval_msec,
6160 unsigned long timeout_msec)
6161{
6162 unsigned long timeout;
6163 u32 tmp;
6164
6165 tmp = ioread32(reg);
6166
6167 /* Calculate timeout _after_ the first read to make sure
6168 * preceding writes reach the controller before starting to
6169 * eat away the timeout.
6170 */
6171 timeout = jiffies + (timeout_msec * HZ) / 1000;
6172
6173 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6174 msleep(interval_msec);
6175 tmp = ioread32(reg);
6176 }
6177
6178 return tmp;
6179}
6180
dd5b06c4
TH
6181/*
6182 * Dummy port_ops
6183 */
6184static void ata_dummy_noret(struct ata_port *ap) { }
6185static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6186static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6187
6188static u8 ata_dummy_check_status(struct ata_port *ap)
6189{
6190 return ATA_DRDY;
6191}
6192
6193static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6194{
6195 return AC_ERR_SYSTEM;
6196}
6197
029cfd6b 6198struct ata_port_operations ata_dummy_port_ops = {
5682ed33
TH
6199 .sff_check_status = ata_dummy_check_status,
6200 .sff_check_altstatus = ata_dummy_check_status,
6201 .sff_dev_select = ata_noop_dev_select,
dd5b06c4
TH
6202 .qc_prep = ata_noop_qc_prep,
6203 .qc_issue = ata_dummy_qc_issue,
6204 .freeze = ata_dummy_noret,
6205 .thaw = ata_dummy_noret,
6206 .error_handler = ata_dummy_noret,
6207 .post_internal_cmd = ata_dummy_qc_noret,
5682ed33 6208 .sff_irq_clear = ata_dummy_noret,
dd5b06c4
TH
6209 .port_start = ata_dummy_ret0,
6210 .port_stop = ata_dummy_noret,
6211};
6212
21b0ad4f
TH
6213const struct ata_port_info ata_dummy_port_info = {
6214 .port_ops = &ata_dummy_port_ops,
6215};
6216
1da177e4
LT
6217/*
6218 * libata is essentially a library of internal helper functions for
6219 * low-level ATA host controller drivers. As such, the API/ABI is
6220 * likely to change as new drivers are added and updated.
6221 * Do not depend on ABI/API stability.
6222 */
e9c83914
TH
6223EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6224EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6225EXPORT_SYMBOL_GPL(sata_deb_timing_long);
029cfd6b
TH
6226EXPORT_SYMBOL_GPL(ata_base_port_ops);
6227EXPORT_SYMBOL_GPL(sata_port_ops);
6228EXPORT_SYMBOL_GPL(sata_pmp_port_ops);
dd5b06c4 6229EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 6230EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4 6231EXPORT_SYMBOL_GPL(ata_std_bios_param);
cca3974e 6232EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 6233EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 6234EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 6235EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 6236EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 6237EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 6238EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 6239EXPORT_SYMBOL_GPL(ata_sg_init);
f686bcb8 6240EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6241EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6242EXPORT_SYMBOL_GPL(ata_noop_dev_select);
43727fbc 6243EXPORT_SYMBOL_GPL(sata_print_link_status);
436d34b3 6244EXPORT_SYMBOL_GPL(atapi_cmd_type);
1da177e4
LT
6245EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6246EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
6247EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6248EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6249EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6250EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6251EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6252EXPORT_SYMBOL_GPL(ata_mode_string);
6253EXPORT_SYMBOL_GPL(ata_id_xfermask);
1da177e4 6254EXPORT_SYMBOL_GPL(ata_port_start);
04351821 6255EXPORT_SYMBOL_GPL(ata_do_set_mode);
31cc23b3 6256EXPORT_SYMBOL_GPL(ata_std_qc_defer);
e46834cd 6257EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4 6258EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6259EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6260EXPORT_SYMBOL_GPL(sata_set_spd);
aa2731ad 6261EXPORT_SYMBOL_GPL(ata_wait_after_reset);
936fd732
TH
6262EXPORT_SYMBOL_GPL(sata_link_debounce);
6263EXPORT_SYMBOL_GPL(sata_link_resume);
0aa1113d 6264EXPORT_SYMBOL_GPL(ata_std_prereset);
cc0680a5 6265EXPORT_SYMBOL_GPL(sata_link_hardreset);
57c9efdf 6266EXPORT_SYMBOL_GPL(sata_std_hardreset);
203c75b8 6267EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6268EXPORT_SYMBOL_GPL(ata_dev_classify);
6269EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6270EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6271EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6272EXPORT_SYMBOL_GPL(ata_wait_register);
1da177e4
LT
6273EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6274EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6275EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6276EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6277EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
34bf2170
TH
6278EXPORT_SYMBOL_GPL(sata_scr_valid);
6279EXPORT_SYMBOL_GPL(sata_scr_read);
6280EXPORT_SYMBOL_GPL(sata_scr_write);
6281EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
6282EXPORT_SYMBOL_GPL(ata_link_online);
6283EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 6284#ifdef CONFIG_PM
cca3974e
JG
6285EXPORT_SYMBOL_GPL(ata_host_suspend);
6286EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6287#endif /* CONFIG_PM */
6a62a04d
TH
6288EXPORT_SYMBOL_GPL(ata_id_string);
6289EXPORT_SYMBOL_GPL(ata_id_c_string);
1da177e4
LT
6290EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6291
1bc4ccff 6292EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 6293EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
6294EXPORT_SYMBOL_GPL(ata_timing_compute);
6295EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 6296EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 6297
1da177e4
LT
6298#ifdef CONFIG_PCI
6299EXPORT_SYMBOL_GPL(pci_test_config_bits);
1da177e4 6300EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6301#ifdef CONFIG_PM
500530f6
TH
6302EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6303EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6304EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6305EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6306#endif /* CONFIG_PM */
1da177e4 6307#endif /* CONFIG_PCI */
9b847548 6308
31f88384 6309EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
a1efdaba 6310EXPORT_SYMBOL_GPL(sata_pmp_error_handler);
3af9a77a 6311
b64bbc39
TH
6312EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6313EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6314EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
6315EXPORT_SYMBOL_GPL(ata_port_desc);
6316#ifdef CONFIG_PCI
6317EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6318#endif /* CONFIG_PCI */
7b70fc03 6319EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 6320EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 6321EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 6322EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 6323EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
6324EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6325EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6326EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6327EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6328EXPORT_SYMBOL_GPL(ata_do_eh);
a1efdaba 6329EXPORT_SYMBOL_GPL(ata_std_error_handler);
be0d18df
AC
6330
6331EXPORT_SYMBOL_GPL(ata_cable_40wire);
6332EXPORT_SYMBOL_GPL(ata_cable_80wire);
6333EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 6334EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 6335EXPORT_SYMBOL_GPL(ata_cable_sata);