]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/ata/libata-core.c
libata: separate out ata_host_alloc() and ata_host_register()
[mirror_ubuntu-focal-kernel.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
cb48cab7 62#define DRV_VERSION "2.20" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
d7d0dad6
JG
96int libata_noacpi = 1;
97module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
98MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
99
1da177e4
LT
100MODULE_AUTHOR("Jeff Garzik");
101MODULE_DESCRIPTION("Library module for ATA devices");
102MODULE_LICENSE("GPL");
103MODULE_VERSION(DRV_VERSION);
104
0baab86b 105
1da177e4
LT
106/**
107 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
108 * @tf: Taskfile to convert
109 * @fis: Buffer into which data will output
110 * @pmp: Port multiplier port
111 *
112 * Converts a standard ATA taskfile to a Serial ATA
113 * FIS structure (Register - Host to Device).
114 *
115 * LOCKING:
116 * Inherited from caller.
117 */
118
057ace5e 119void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
120{
121 fis[0] = 0x27; /* Register - Host to Device FIS */
122 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
123 bit 7 indicates Command FIS */
124 fis[2] = tf->command;
125 fis[3] = tf->feature;
126
127 fis[4] = tf->lbal;
128 fis[5] = tf->lbam;
129 fis[6] = tf->lbah;
130 fis[7] = tf->device;
131
132 fis[8] = tf->hob_lbal;
133 fis[9] = tf->hob_lbam;
134 fis[10] = tf->hob_lbah;
135 fis[11] = tf->hob_feature;
136
137 fis[12] = tf->nsect;
138 fis[13] = tf->hob_nsect;
139 fis[14] = 0;
140 fis[15] = tf->ctl;
141
142 fis[16] = 0;
143 fis[17] = 0;
144 fis[18] = 0;
145 fis[19] = 0;
146}
147
148/**
149 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
150 * @fis: Buffer from which data will be input
151 * @tf: Taskfile to output
152 *
e12a1be6 153 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
154 *
155 * LOCKING:
156 * Inherited from caller.
157 */
158
057ace5e 159void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
160{
161 tf->command = fis[2]; /* status */
162 tf->feature = fis[3]; /* error */
163
164 tf->lbal = fis[4];
165 tf->lbam = fis[5];
166 tf->lbah = fis[6];
167 tf->device = fis[7];
168
169 tf->hob_lbal = fis[8];
170 tf->hob_lbam = fis[9];
171 tf->hob_lbah = fis[10];
172
173 tf->nsect = fis[12];
174 tf->hob_nsect = fis[13];
175}
176
8cbd6df1
AL
177static const u8 ata_rw_cmds[] = {
178 /* pio multi */
179 ATA_CMD_READ_MULTI,
180 ATA_CMD_WRITE_MULTI,
181 ATA_CMD_READ_MULTI_EXT,
182 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
187 /* pio */
188 ATA_CMD_PIO_READ,
189 ATA_CMD_PIO_WRITE,
190 ATA_CMD_PIO_READ_EXT,
191 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
192 0,
193 0,
194 0,
195 0,
8cbd6df1
AL
196 /* dma */
197 ATA_CMD_READ,
198 ATA_CMD_WRITE,
199 ATA_CMD_READ_EXT,
9a3dccc4
TH
200 ATA_CMD_WRITE_EXT,
201 0,
202 0,
203 0,
204 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 205};
1da177e4
LT
206
207/**
8cbd6df1 208 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
209 * @tf: command to examine and configure
210 * @dev: device tf belongs to
1da177e4 211 *
2e9edbf8 212 * Examine the device configuration and tf->flags to calculate
8cbd6df1 213 * the proper read/write commands and protocol to use.
1da177e4
LT
214 *
215 * LOCKING:
216 * caller.
217 */
bd056d7e 218static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 219{
9a3dccc4 220 u8 cmd;
1da177e4 221
9a3dccc4 222 int index, fua, lba48, write;
2e9edbf8 223
9a3dccc4 224 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
225 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
226 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 227
8cbd6df1
AL
228 if (dev->flags & ATA_DFLAG_PIO) {
229 tf->protocol = ATA_PROT_PIO;
9a3dccc4 230 index = dev->multi_count ? 0 : 8;
bd056d7e 231 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
232 /* Unable to use DMA due to host limitation */
233 tf->protocol = ATA_PROT_PIO;
0565c26d 234 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
235 } else {
236 tf->protocol = ATA_PROT_DMA;
9a3dccc4 237 index = 16;
8cbd6df1 238 }
1da177e4 239
9a3dccc4
TH
240 cmd = ata_rw_cmds[index + fua + lba48 + write];
241 if (cmd) {
242 tf->command = cmd;
243 return 0;
244 }
245 return -1;
1da177e4
LT
246}
247
35b649fe
TH
248/**
249 * ata_tf_read_block - Read block address from ATA taskfile
250 * @tf: ATA taskfile of interest
251 * @dev: ATA device @tf belongs to
252 *
253 * LOCKING:
254 * None.
255 *
256 * Read block address from @tf. This function can handle all
257 * three address formats - LBA, LBA48 and CHS. tf->protocol and
258 * flags select the address format to use.
259 *
260 * RETURNS:
261 * Block address read from @tf.
262 */
263u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
264{
265 u64 block = 0;
266
267 if (tf->flags & ATA_TFLAG_LBA) {
268 if (tf->flags & ATA_TFLAG_LBA48) {
269 block |= (u64)tf->hob_lbah << 40;
270 block |= (u64)tf->hob_lbam << 32;
271 block |= tf->hob_lbal << 24;
272 } else
273 block |= (tf->device & 0xf) << 24;
274
275 block |= tf->lbah << 16;
276 block |= tf->lbam << 8;
277 block |= tf->lbal;
278 } else {
279 u32 cyl, head, sect;
280
281 cyl = tf->lbam | (tf->lbah << 8);
282 head = tf->device & 0xf;
283 sect = tf->lbal;
284
285 block = (cyl * dev->heads + head) * dev->sectors + sect;
286 }
287
288 return block;
289}
290
bd056d7e
TH
291/**
292 * ata_build_rw_tf - Build ATA taskfile for given read/write request
293 * @tf: Target ATA taskfile
294 * @dev: ATA device @tf belongs to
295 * @block: Block address
296 * @n_block: Number of blocks
297 * @tf_flags: RW/FUA etc...
298 * @tag: tag
299 *
300 * LOCKING:
301 * None.
302 *
303 * Build ATA taskfile @tf for read/write request described by
304 * @block, @n_block, @tf_flags and @tag on @dev.
305 *
306 * RETURNS:
307 *
308 * 0 on success, -ERANGE if the request is too large for @dev,
309 * -EINVAL if the request is invalid.
310 */
311int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
312 u64 block, u32 n_block, unsigned int tf_flags,
313 unsigned int tag)
314{
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags;
317
6d1245bf 318 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
319 /* yay, NCQ */
320 if (!lba_48_ok(block, n_block))
321 return -ERANGE;
322
323 tf->protocol = ATA_PROT_NCQ;
324 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
325
326 if (tf->flags & ATA_TFLAG_WRITE)
327 tf->command = ATA_CMD_FPDMA_WRITE;
328 else
329 tf->command = ATA_CMD_FPDMA_READ;
330
331 tf->nsect = tag << 3;
332 tf->hob_feature = (n_block >> 8) & 0xff;
333 tf->feature = n_block & 0xff;
334
335 tf->hob_lbah = (block >> 40) & 0xff;
336 tf->hob_lbam = (block >> 32) & 0xff;
337 tf->hob_lbal = (block >> 24) & 0xff;
338 tf->lbah = (block >> 16) & 0xff;
339 tf->lbam = (block >> 8) & 0xff;
340 tf->lbal = block & 0xff;
341
342 tf->device = 1 << 6;
343 if (tf->flags & ATA_TFLAG_FUA)
344 tf->device |= 1 << 7;
345 } else if (dev->flags & ATA_DFLAG_LBA) {
346 tf->flags |= ATA_TFLAG_LBA;
347
348 if (lba_28_ok(block, n_block)) {
349 /* use LBA28 */
350 tf->device |= (block >> 24) & 0xf;
351 } else if (lba_48_ok(block, n_block)) {
352 if (!(dev->flags & ATA_DFLAG_LBA48))
353 return -ERANGE;
354
355 /* use LBA48 */
356 tf->flags |= ATA_TFLAG_LBA48;
357
358 tf->hob_nsect = (n_block >> 8) & 0xff;
359
360 tf->hob_lbah = (block >> 40) & 0xff;
361 tf->hob_lbam = (block >> 32) & 0xff;
362 tf->hob_lbal = (block >> 24) & 0xff;
363 } else
364 /* request too large even for LBA48 */
365 return -ERANGE;
366
367 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
368 return -EINVAL;
369
370 tf->nsect = n_block & 0xff;
371
372 tf->lbah = (block >> 16) & 0xff;
373 tf->lbam = (block >> 8) & 0xff;
374 tf->lbal = block & 0xff;
375
376 tf->device |= ATA_LBA;
377 } else {
378 /* CHS */
379 u32 sect, head, cyl, track;
380
381 /* The request -may- be too large for CHS addressing. */
382 if (!lba_28_ok(block, n_block))
383 return -ERANGE;
384
385 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
386 return -EINVAL;
387
388 /* Convert LBA to CHS */
389 track = (u32)block / dev->sectors;
390 cyl = track / dev->heads;
391 head = track % dev->heads;
392 sect = (u32)block % dev->sectors + 1;
393
394 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
395 (u32)block, track, cyl, head, sect);
396
397 /* Check whether the converted CHS can fit.
398 Cylinder: 0-65535
399 Head: 0-15
400 Sector: 1-255*/
401 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
402 return -ERANGE;
403
404 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
405 tf->lbal = sect;
406 tf->lbam = cyl;
407 tf->lbah = cyl >> 8;
408 tf->device |= head;
409 }
410
411 return 0;
412}
413
cb95d562
TH
414/**
415 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
416 * @pio_mask: pio_mask
417 * @mwdma_mask: mwdma_mask
418 * @udma_mask: udma_mask
419 *
420 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
421 * unsigned int xfer_mask.
422 *
423 * LOCKING:
424 * None.
425 *
426 * RETURNS:
427 * Packed xfer_mask.
428 */
429static unsigned int ata_pack_xfermask(unsigned int pio_mask,
430 unsigned int mwdma_mask,
431 unsigned int udma_mask)
432{
433 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
434 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
435 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
436}
437
c0489e4e
TH
438/**
439 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
440 * @xfer_mask: xfer_mask to unpack
441 * @pio_mask: resulting pio_mask
442 * @mwdma_mask: resulting mwdma_mask
443 * @udma_mask: resulting udma_mask
444 *
445 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
446 * Any NULL distination masks will be ignored.
447 */
448static void ata_unpack_xfermask(unsigned int xfer_mask,
449 unsigned int *pio_mask,
450 unsigned int *mwdma_mask,
451 unsigned int *udma_mask)
452{
453 if (pio_mask)
454 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
455 if (mwdma_mask)
456 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
457 if (udma_mask)
458 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
459}
460
cb95d562 461static const struct ata_xfer_ent {
be9a50c8 462 int shift, bits;
cb95d562
TH
463 u8 base;
464} ata_xfer_tbl[] = {
465 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
466 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
467 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
468 { -1, },
469};
470
471/**
472 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
473 * @xfer_mask: xfer_mask of interest
474 *
475 * Return matching XFER_* value for @xfer_mask. Only the highest
476 * bit of @xfer_mask is considered.
477 *
478 * LOCKING:
479 * None.
480 *
481 * RETURNS:
482 * Matching XFER_* value, 0 if no match found.
483 */
484static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
485{
486 int highbit = fls(xfer_mask) - 1;
487 const struct ata_xfer_ent *ent;
488
489 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
490 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
491 return ent->base + highbit - ent->shift;
492 return 0;
493}
494
495/**
496 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
497 * @xfer_mode: XFER_* of interest
498 *
499 * Return matching xfer_mask for @xfer_mode.
500 *
501 * LOCKING:
502 * None.
503 *
504 * RETURNS:
505 * Matching xfer_mask, 0 if no match found.
506 */
507static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
508{
509 const struct ata_xfer_ent *ent;
510
511 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
512 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
513 return 1 << (ent->shift + xfer_mode - ent->base);
514 return 0;
515}
516
517/**
518 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
519 * @xfer_mode: XFER_* of interest
520 *
521 * Return matching xfer_shift for @xfer_mode.
522 *
523 * LOCKING:
524 * None.
525 *
526 * RETURNS:
527 * Matching xfer_shift, -1 if no match found.
528 */
529static int ata_xfer_mode2shift(unsigned int xfer_mode)
530{
531 const struct ata_xfer_ent *ent;
532
533 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
534 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
535 return ent->shift;
536 return -1;
537}
538
1da177e4 539/**
1da7b0d0
TH
540 * ata_mode_string - convert xfer_mask to string
541 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
542 *
543 * Determine string which represents the highest speed
1da7b0d0 544 * (highest bit in @modemask).
1da177e4
LT
545 *
546 * LOCKING:
547 * None.
548 *
549 * RETURNS:
550 * Constant C string representing highest speed listed in
1da7b0d0 551 * @mode_mask, or the constant C string "<n/a>".
1da177e4 552 */
1da7b0d0 553static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 554{
75f554bc
TH
555 static const char * const xfer_mode_str[] = {
556 "PIO0",
557 "PIO1",
558 "PIO2",
559 "PIO3",
560 "PIO4",
b352e57d
AC
561 "PIO5",
562 "PIO6",
75f554bc
TH
563 "MWDMA0",
564 "MWDMA1",
565 "MWDMA2",
b352e57d
AC
566 "MWDMA3",
567 "MWDMA4",
75f554bc
TH
568 "UDMA/16",
569 "UDMA/25",
570 "UDMA/33",
571 "UDMA/44",
572 "UDMA/66",
573 "UDMA/100",
574 "UDMA/133",
575 "UDMA7",
576 };
1da7b0d0 577 int highbit;
1da177e4 578
1da7b0d0
TH
579 highbit = fls(xfer_mask) - 1;
580 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
581 return xfer_mode_str[highbit];
1da177e4 582 return "<n/a>";
1da177e4
LT
583}
584
4c360c81
TH
585static const char *sata_spd_string(unsigned int spd)
586{
587 static const char * const spd_str[] = {
588 "1.5 Gbps",
589 "3.0 Gbps",
590 };
591
592 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
593 return "<unknown>";
594 return spd_str[spd - 1];
595}
596
3373efd8 597void ata_dev_disable(struct ata_device *dev)
0b8efb0a 598{
0dd4b21f 599 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 600 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
601 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
602 ATA_DNXFER_QUIET);
0b8efb0a
TH
603 dev->class++;
604 }
605}
606
1da177e4 607/**
0d5ff566 608 * ata_devchk - PATA device presence detection
1da177e4
LT
609 * @ap: ATA channel to examine
610 * @device: Device to examine (starting at zero)
611 *
612 * This technique was originally described in
613 * Hale Landis's ATADRVR (www.ata-atapi.com), and
614 * later found its way into the ATA/ATAPI spec.
615 *
616 * Write a pattern to the ATA shadow registers,
617 * and if a device is present, it will respond by
618 * correctly storing and echoing back the
619 * ATA shadow register contents.
620 *
621 * LOCKING:
622 * caller.
623 */
624
0d5ff566 625static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
626{
627 struct ata_ioports *ioaddr = &ap->ioaddr;
628 u8 nsect, lbal;
629
630 ap->ops->dev_select(ap, device);
631
0d5ff566
TH
632 iowrite8(0x55, ioaddr->nsect_addr);
633 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 634
0d5ff566
TH
635 iowrite8(0xaa, ioaddr->nsect_addr);
636 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 637
0d5ff566
TH
638 iowrite8(0x55, ioaddr->nsect_addr);
639 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 640
0d5ff566
TH
641 nsect = ioread8(ioaddr->nsect_addr);
642 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
643
644 if ((nsect == 0x55) && (lbal == 0xaa))
645 return 1; /* we found a device */
646
647 return 0; /* nothing found */
648}
649
1da177e4
LT
650/**
651 * ata_dev_classify - determine device type based on ATA-spec signature
652 * @tf: ATA taskfile register set for device to be identified
653 *
654 * Determine from taskfile register contents whether a device is
655 * ATA or ATAPI, as per "Signature and persistence" section
656 * of ATA/PI spec (volume 1, sect 5.14).
657 *
658 * LOCKING:
659 * None.
660 *
661 * RETURNS:
662 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
663 * the event of failure.
664 */
665
057ace5e 666unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
667{
668 /* Apple's open source Darwin code hints that some devices only
669 * put a proper signature into the LBA mid/high registers,
670 * So, we only check those. It's sufficient for uniqueness.
671 */
672
673 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
674 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
675 DPRINTK("found ATA device by sig\n");
676 return ATA_DEV_ATA;
677 }
678
679 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
680 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
681 DPRINTK("found ATAPI device by sig\n");
682 return ATA_DEV_ATAPI;
683 }
684
685 DPRINTK("unknown device\n");
686 return ATA_DEV_UNKNOWN;
687}
688
689/**
690 * ata_dev_try_classify - Parse returned ATA device signature
691 * @ap: ATA channel to examine
692 * @device: Device to examine (starting at zero)
b4dc7623 693 * @r_err: Value of error register on completion
1da177e4
LT
694 *
695 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
696 * an ATA/ATAPI-defined set of values is placed in the ATA
697 * shadow registers, indicating the results of device detection
698 * and diagnostics.
699 *
700 * Select the ATA device, and read the values from the ATA shadow
701 * registers. Then parse according to the Error register value,
702 * and the spec-defined values examined by ata_dev_classify().
703 *
704 * LOCKING:
705 * caller.
b4dc7623
TH
706 *
707 * RETURNS:
708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
709 */
710
a619f981 711unsigned int
b4dc7623 712ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 713{
1da177e4
LT
714 struct ata_taskfile tf;
715 unsigned int class;
716 u8 err;
717
718 ap->ops->dev_select(ap, device);
719
720 memset(&tf, 0, sizeof(tf));
721
1da177e4 722 ap->ops->tf_read(ap, &tf);
0169e284 723 err = tf.feature;
b4dc7623
TH
724 if (r_err)
725 *r_err = err;
1da177e4 726
93590859
AC
727 /* see if device passed diags: if master then continue and warn later */
728 if (err == 0 && device == 0)
729 /* diagnostic fail : do nothing _YET_ */
730 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
731 else if (err == 1)
1da177e4
LT
732 /* do nothing */ ;
733 else if ((device == 0) && (err == 0x81))
734 /* do nothing */ ;
735 else
b4dc7623 736 return ATA_DEV_NONE;
1da177e4 737
b4dc7623 738 /* determine if device is ATA or ATAPI */
1da177e4 739 class = ata_dev_classify(&tf);
b4dc7623 740
1da177e4 741 if (class == ATA_DEV_UNKNOWN)
b4dc7623 742 return ATA_DEV_NONE;
1da177e4 743 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
744 return ATA_DEV_NONE;
745 return class;
1da177e4
LT
746}
747
748/**
6a62a04d 749 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
750 * @id: IDENTIFY DEVICE results we will examine
751 * @s: string into which data is output
752 * @ofs: offset into identify device page
753 * @len: length of string to return. must be an even number.
754 *
755 * The strings in the IDENTIFY DEVICE page are broken up into
756 * 16-bit chunks. Run through the string, and output each
757 * 8-bit chunk linearly, regardless of platform.
758 *
759 * LOCKING:
760 * caller.
761 */
762
6a62a04d
TH
763void ata_id_string(const u16 *id, unsigned char *s,
764 unsigned int ofs, unsigned int len)
1da177e4
LT
765{
766 unsigned int c;
767
768 while (len > 0) {
769 c = id[ofs] >> 8;
770 *s = c;
771 s++;
772
773 c = id[ofs] & 0xff;
774 *s = c;
775 s++;
776
777 ofs++;
778 len -= 2;
779 }
780}
781
0e949ff3 782/**
6a62a04d 783 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
784 * @id: IDENTIFY DEVICE results we will examine
785 * @s: string into which data is output
786 * @ofs: offset into identify device page
787 * @len: length of string to return. must be an odd number.
788 *
6a62a04d 789 * This function is identical to ata_id_string except that it
0e949ff3
TH
790 * trims trailing spaces and terminates the resulting string with
791 * null. @len must be actual maximum length (even number) + 1.
792 *
793 * LOCKING:
794 * caller.
795 */
6a62a04d
TH
796void ata_id_c_string(const u16 *id, unsigned char *s,
797 unsigned int ofs, unsigned int len)
0e949ff3
TH
798{
799 unsigned char *p;
800
801 WARN_ON(!(len & 1));
802
6a62a04d 803 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
804
805 p = s + strnlen(s, len - 1);
806 while (p > s && p[-1] == ' ')
807 p--;
808 *p = '\0';
809}
0baab86b 810
2940740b
TH
811static u64 ata_id_n_sectors(const u16 *id)
812{
813 if (ata_id_has_lba(id)) {
814 if (ata_id_has_lba48(id))
815 return ata_id_u64(id, 100);
816 else
817 return ata_id_u32(id, 60);
818 } else {
819 if (ata_id_current_chs_valid(id))
820 return ata_id_u32(id, 57);
821 else
822 return id[1] * id[3] * id[6];
823 }
824}
825
10305f0f
AC
826/**
827 * ata_id_to_dma_mode - Identify DMA mode from id block
828 * @dev: device to identify
cc261267 829 * @unknown: mode to assume if we cannot tell
10305f0f
AC
830 *
831 * Set up the timing values for the device based upon the identify
832 * reported values for the DMA mode. This function is used by drivers
833 * which rely upon firmware configured modes, but wish to report the
834 * mode correctly when possible.
835 *
836 * In addition we emit similarly formatted messages to the default
837 * ata_dev_set_mode handler, in order to provide consistency of
838 * presentation.
839 */
840
841void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
842{
843 unsigned int mask;
844 u8 mode;
845
846 /* Pack the DMA modes */
847 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
848 if (dev->id[53] & 0x04)
849 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
850
851 /* Select the mode in use */
852 mode = ata_xfer_mask2mode(mask);
853
854 if (mode != 0) {
855 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
856 ata_mode_string(mask));
857 } else {
858 /* SWDMA perhaps ? */
859 mode = unknown;
860 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
861 }
862
863 /* Configure the device reporting */
864 dev->xfer_mode = mode;
865 dev->xfer_shift = ata_xfer_mode2shift(mode);
866}
867
0baab86b
EF
868/**
869 * ata_noop_dev_select - Select device 0/1 on ATA bus
870 * @ap: ATA channel to manipulate
871 * @device: ATA device (numbered from zero) to select
872 *
873 * This function performs no actual function.
874 *
875 * May be used as the dev_select() entry in ata_port_operations.
876 *
877 * LOCKING:
878 * caller.
879 */
1da177e4
LT
880void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
881{
882}
883
0baab86b 884
1da177e4
LT
885/**
886 * ata_std_dev_select - Select device 0/1 on ATA bus
887 * @ap: ATA channel to manipulate
888 * @device: ATA device (numbered from zero) to select
889 *
890 * Use the method defined in the ATA specification to
891 * make either device 0, or device 1, active on the
0baab86b
EF
892 * ATA channel. Works with both PIO and MMIO.
893 *
894 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
895 *
896 * LOCKING:
897 * caller.
898 */
899
900void ata_std_dev_select (struct ata_port *ap, unsigned int device)
901{
902 u8 tmp;
903
904 if (device == 0)
905 tmp = ATA_DEVICE_OBS;
906 else
907 tmp = ATA_DEVICE_OBS | ATA_DEV1;
908
0d5ff566 909 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
910 ata_pause(ap); /* needed; also flushes, for mmio */
911}
912
913/**
914 * ata_dev_select - Select device 0/1 on ATA bus
915 * @ap: ATA channel to manipulate
916 * @device: ATA device (numbered from zero) to select
917 * @wait: non-zero to wait for Status register BSY bit to clear
918 * @can_sleep: non-zero if context allows sleeping
919 *
920 * Use the method defined in the ATA specification to
921 * make either device 0, or device 1, active on the
922 * ATA channel.
923 *
924 * This is a high-level version of ata_std_dev_select(),
925 * which additionally provides the services of inserting
926 * the proper pauses and status polling, where needed.
927 *
928 * LOCKING:
929 * caller.
930 */
931
932void ata_dev_select(struct ata_port *ap, unsigned int device,
933 unsigned int wait, unsigned int can_sleep)
934{
88574551 935 if (ata_msg_probe(ap))
44877b4e
TH
936 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
937 "device %u, wait %u\n", device, wait);
1da177e4
LT
938
939 if (wait)
940 ata_wait_idle(ap);
941
942 ap->ops->dev_select(ap, device);
943
944 if (wait) {
945 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
946 msleep(150);
947 ata_wait_idle(ap);
948 }
949}
950
951/**
952 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 953 * @id: IDENTIFY DEVICE page to dump
1da177e4 954 *
0bd3300a
TH
955 * Dump selected 16-bit words from the given IDENTIFY DEVICE
956 * page.
1da177e4
LT
957 *
958 * LOCKING:
959 * caller.
960 */
961
0bd3300a 962static inline void ata_dump_id(const u16 *id)
1da177e4
LT
963{
964 DPRINTK("49==0x%04x "
965 "53==0x%04x "
966 "63==0x%04x "
967 "64==0x%04x "
968 "75==0x%04x \n",
0bd3300a
TH
969 id[49],
970 id[53],
971 id[63],
972 id[64],
973 id[75]);
1da177e4
LT
974 DPRINTK("80==0x%04x "
975 "81==0x%04x "
976 "82==0x%04x "
977 "83==0x%04x "
978 "84==0x%04x \n",
0bd3300a
TH
979 id[80],
980 id[81],
981 id[82],
982 id[83],
983 id[84]);
1da177e4
LT
984 DPRINTK("88==0x%04x "
985 "93==0x%04x\n",
0bd3300a
TH
986 id[88],
987 id[93]);
1da177e4
LT
988}
989
cb95d562
TH
990/**
991 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
992 * @id: IDENTIFY data to compute xfer mask from
993 *
994 * Compute the xfermask for this device. This is not as trivial
995 * as it seems if we must consider early devices correctly.
996 *
997 * FIXME: pre IDE drive timing (do we care ?).
998 *
999 * LOCKING:
1000 * None.
1001 *
1002 * RETURNS:
1003 * Computed xfermask
1004 */
1005static unsigned int ata_id_xfermask(const u16 *id)
1006{
1007 unsigned int pio_mask, mwdma_mask, udma_mask;
1008
1009 /* Usual case. Word 53 indicates word 64 is valid */
1010 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1011 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1012 pio_mask <<= 3;
1013 pio_mask |= 0x7;
1014 } else {
1015 /* If word 64 isn't valid then Word 51 high byte holds
1016 * the PIO timing number for the maximum. Turn it into
1017 * a mask.
1018 */
7a0f1c8a 1019 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1020 if (mode < 5) /* Valid PIO range */
1021 pio_mask = (2 << mode) - 1;
1022 else
1023 pio_mask = 1;
cb95d562
TH
1024
1025 /* But wait.. there's more. Design your standards by
1026 * committee and you too can get a free iordy field to
1027 * process. However its the speeds not the modes that
1028 * are supported... Note drivers using the timing API
1029 * will get this right anyway
1030 */
1031 }
1032
1033 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1034
b352e57d
AC
1035 if (ata_id_is_cfa(id)) {
1036 /*
1037 * Process compact flash extended modes
1038 */
1039 int pio = id[163] & 0x7;
1040 int dma = (id[163] >> 3) & 7;
1041
1042 if (pio)
1043 pio_mask |= (1 << 5);
1044 if (pio > 1)
1045 pio_mask |= (1 << 6);
1046 if (dma)
1047 mwdma_mask |= (1 << 3);
1048 if (dma > 1)
1049 mwdma_mask |= (1 << 4);
1050 }
1051
fb21f0d0
TH
1052 udma_mask = 0;
1053 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1054 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1055
1056 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1057}
1058
86e45b6b
TH
1059/**
1060 * ata_port_queue_task - Queue port_task
1061 * @ap: The ata_port to queue port_task for
e2a7f77a 1062 * @fn: workqueue function to be scheduled
65f27f38 1063 * @data: data for @fn to use
e2a7f77a 1064 * @delay: delay time for workqueue function
86e45b6b
TH
1065 *
1066 * Schedule @fn(@data) for execution after @delay jiffies using
1067 * port_task. There is one port_task per port and it's the
1068 * user(low level driver)'s responsibility to make sure that only
1069 * one task is active at any given time.
1070 *
1071 * libata core layer takes care of synchronization between
1072 * port_task and EH. ata_port_queue_task() may be ignored for EH
1073 * synchronization.
1074 *
1075 * LOCKING:
1076 * Inherited from caller.
1077 */
65f27f38 1078void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1079 unsigned long delay)
1080{
1081 int rc;
1082
b51e9e5d 1083 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1084 return;
1085
65f27f38
DH
1086 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1087 ap->port_task_data = data;
86e45b6b 1088
52bad64d 1089 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1090
1091 /* rc == 0 means that another user is using port task */
1092 WARN_ON(rc == 0);
1093}
1094
1095/**
1096 * ata_port_flush_task - Flush port_task
1097 * @ap: The ata_port to flush port_task for
1098 *
1099 * After this function completes, port_task is guranteed not to
1100 * be running or scheduled.
1101 *
1102 * LOCKING:
1103 * Kernel thread context (may sleep)
1104 */
1105void ata_port_flush_task(struct ata_port *ap)
1106{
1107 unsigned long flags;
1108
1109 DPRINTK("ENTER\n");
1110
ba6a1308 1111 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1112 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1113 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1114
1115 DPRINTK("flush #1\n");
1116 flush_workqueue(ata_wq);
1117
1118 /*
1119 * At this point, if a task is running, it's guaranteed to see
1120 * the FLUSH flag; thus, it will never queue pio tasks again.
1121 * Cancel and flush.
1122 */
1123 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1124 if (ata_msg_ctl(ap))
88574551
TH
1125 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1126 __FUNCTION__);
86e45b6b
TH
1127 flush_workqueue(ata_wq);
1128 }
1129
ba6a1308 1130 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1131 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1132 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1133
0dd4b21f
BP
1134 if (ata_msg_ctl(ap))
1135 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1136}
1137
7102d230 1138static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1139{
77853bf2 1140 struct completion *waiting = qc->private_data;
a2a7a662 1141
a2a7a662 1142 complete(waiting);
a2a7a662
TH
1143}
1144
1145/**
2432697b 1146 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1147 * @dev: Device to which the command is sent
1148 * @tf: Taskfile registers for the command and the result
d69cf37d 1149 * @cdb: CDB for packet command
a2a7a662 1150 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1151 * @sg: sg list for the data buffer of the command
1152 * @n_elem: Number of sg entries
a2a7a662
TH
1153 *
1154 * Executes libata internal command with timeout. @tf contains
1155 * command on entry and result on return. Timeout and error
1156 * conditions are reported via return value. No recovery action
1157 * is taken after a command times out. It's caller's duty to
1158 * clean up after timeout.
1159 *
1160 * LOCKING:
1161 * None. Should be called with kernel context, might sleep.
551e8889
TH
1162 *
1163 * RETURNS:
1164 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1165 */
2432697b
TH
1166unsigned ata_exec_internal_sg(struct ata_device *dev,
1167 struct ata_taskfile *tf, const u8 *cdb,
1168 int dma_dir, struct scatterlist *sg,
1169 unsigned int n_elem)
a2a7a662 1170{
3373efd8 1171 struct ata_port *ap = dev->ap;
a2a7a662
TH
1172 u8 command = tf->command;
1173 struct ata_queued_cmd *qc;
2ab7db1f 1174 unsigned int tag, preempted_tag;
dedaf2b0 1175 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1176 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1177 unsigned long flags;
77853bf2 1178 unsigned int err_mask;
d95a717f 1179 int rc;
a2a7a662 1180
ba6a1308 1181 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1182
e3180499 1183 /* no internal command while frozen */
b51e9e5d 1184 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1185 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1186 return AC_ERR_SYSTEM;
1187 }
1188
2ab7db1f 1189 /* initialize internal qc */
a2a7a662 1190
2ab7db1f
TH
1191 /* XXX: Tag 0 is used for drivers with legacy EH as some
1192 * drivers choke if any other tag is given. This breaks
1193 * ata_tag_internal() test for those drivers. Don't use new
1194 * EH stuff without converting to it.
1195 */
1196 if (ap->ops->error_handler)
1197 tag = ATA_TAG_INTERNAL;
1198 else
1199 tag = 0;
1200
6cec4a39 1201 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1202 BUG();
f69499f4 1203 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1204
1205 qc->tag = tag;
1206 qc->scsicmd = NULL;
1207 qc->ap = ap;
1208 qc->dev = dev;
1209 ata_qc_reinit(qc);
1210
1211 preempted_tag = ap->active_tag;
dedaf2b0
TH
1212 preempted_sactive = ap->sactive;
1213 preempted_qc_active = ap->qc_active;
2ab7db1f 1214 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1215 ap->sactive = 0;
1216 ap->qc_active = 0;
2ab7db1f
TH
1217
1218 /* prepare & issue qc */
a2a7a662 1219 qc->tf = *tf;
d69cf37d
TH
1220 if (cdb)
1221 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1222 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1223 qc->dma_dir = dma_dir;
1224 if (dma_dir != DMA_NONE) {
2432697b
TH
1225 unsigned int i, buflen = 0;
1226
1227 for (i = 0; i < n_elem; i++)
1228 buflen += sg[i].length;
1229
1230 ata_sg_init(qc, sg, n_elem);
49c80429 1231 qc->nbytes = buflen;
a2a7a662
TH
1232 }
1233
77853bf2 1234 qc->private_data = &wait;
a2a7a662
TH
1235 qc->complete_fn = ata_qc_complete_internal;
1236
8e0e694a 1237 ata_qc_issue(qc);
a2a7a662 1238
ba6a1308 1239 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1240
a8601e5f 1241 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1242
1243 ata_port_flush_task(ap);
41ade50c 1244
d95a717f 1245 if (!rc) {
ba6a1308 1246 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1247
1248 /* We're racing with irq here. If we lose, the
1249 * following test prevents us from completing the qc
d95a717f
TH
1250 * twice. If we win, the port is frozen and will be
1251 * cleaned up by ->post_internal_cmd().
a2a7a662 1252 */
77853bf2 1253 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1254 qc->err_mask |= AC_ERR_TIMEOUT;
1255
1256 if (ap->ops->error_handler)
1257 ata_port_freeze(ap);
1258 else
1259 ata_qc_complete(qc);
f15a1daf 1260
0dd4b21f
BP
1261 if (ata_msg_warn(ap))
1262 ata_dev_printk(dev, KERN_WARNING,
88574551 1263 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1264 }
1265
ba6a1308 1266 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1267 }
1268
d95a717f
TH
1269 /* do post_internal_cmd */
1270 if (ap->ops->post_internal_cmd)
1271 ap->ops->post_internal_cmd(qc);
1272
a51d644a
TH
1273 /* perform minimal error analysis */
1274 if (qc->flags & ATA_QCFLAG_FAILED) {
1275 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1276 qc->err_mask |= AC_ERR_DEV;
1277
1278 if (!qc->err_mask)
1279 qc->err_mask |= AC_ERR_OTHER;
1280
1281 if (qc->err_mask & ~AC_ERR_OTHER)
1282 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1283 }
1284
15869303 1285 /* finish up */
ba6a1308 1286 spin_lock_irqsave(ap->lock, flags);
15869303 1287
e61e0672 1288 *tf = qc->result_tf;
77853bf2
TH
1289 err_mask = qc->err_mask;
1290
1291 ata_qc_free(qc);
2ab7db1f 1292 ap->active_tag = preempted_tag;
dedaf2b0
TH
1293 ap->sactive = preempted_sactive;
1294 ap->qc_active = preempted_qc_active;
77853bf2 1295
1f7dd3e9
TH
1296 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1297 * Until those drivers are fixed, we detect the condition
1298 * here, fail the command with AC_ERR_SYSTEM and reenable the
1299 * port.
1300 *
1301 * Note that this doesn't change any behavior as internal
1302 * command failure results in disabling the device in the
1303 * higher layer for LLDDs without new reset/EH callbacks.
1304 *
1305 * Kill the following code as soon as those drivers are fixed.
1306 */
198e0fed 1307 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1308 err_mask |= AC_ERR_SYSTEM;
1309 ata_port_probe(ap);
1310 }
1311
ba6a1308 1312 spin_unlock_irqrestore(ap->lock, flags);
15869303 1313
77853bf2 1314 return err_mask;
a2a7a662
TH
1315}
1316
2432697b 1317/**
33480a0e 1318 * ata_exec_internal - execute libata internal command
2432697b
TH
1319 * @dev: Device to which the command is sent
1320 * @tf: Taskfile registers for the command and the result
1321 * @cdb: CDB for packet command
1322 * @dma_dir: Data tranfer direction of the command
1323 * @buf: Data buffer of the command
1324 * @buflen: Length of data buffer
1325 *
1326 * Wrapper around ata_exec_internal_sg() which takes simple
1327 * buffer instead of sg list.
1328 *
1329 * LOCKING:
1330 * None. Should be called with kernel context, might sleep.
1331 *
1332 * RETURNS:
1333 * Zero on success, AC_ERR_* mask on failure
1334 */
1335unsigned ata_exec_internal(struct ata_device *dev,
1336 struct ata_taskfile *tf, const u8 *cdb,
1337 int dma_dir, void *buf, unsigned int buflen)
1338{
33480a0e
TH
1339 struct scatterlist *psg = NULL, sg;
1340 unsigned int n_elem = 0;
2432697b 1341
33480a0e
TH
1342 if (dma_dir != DMA_NONE) {
1343 WARN_ON(!buf);
1344 sg_init_one(&sg, buf, buflen);
1345 psg = &sg;
1346 n_elem++;
1347 }
2432697b 1348
33480a0e 1349 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1350}
1351
977e6b9f
TH
1352/**
1353 * ata_do_simple_cmd - execute simple internal command
1354 * @dev: Device to which the command is sent
1355 * @cmd: Opcode to execute
1356 *
1357 * Execute a 'simple' command, that only consists of the opcode
1358 * 'cmd' itself, without filling any other registers
1359 *
1360 * LOCKING:
1361 * Kernel thread context (may sleep).
1362 *
1363 * RETURNS:
1364 * Zero on success, AC_ERR_* mask on failure
e58eb583 1365 */
77b08fb5 1366unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1367{
1368 struct ata_taskfile tf;
e58eb583
TH
1369
1370 ata_tf_init(dev, &tf);
1371
1372 tf.command = cmd;
1373 tf.flags |= ATA_TFLAG_DEVICE;
1374 tf.protocol = ATA_PROT_NODATA;
1375
977e6b9f 1376 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1377}
1378
1bc4ccff
AC
1379/**
1380 * ata_pio_need_iordy - check if iordy needed
1381 * @adev: ATA device
1382 *
1383 * Check if the current speed of the device requires IORDY. Used
1384 * by various controllers for chip configuration.
1385 */
432729f0 1386
1bc4ccff
AC
1387unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1388{
432729f0
AC
1389 /* Controller doesn't support IORDY. Probably a pointless check
1390 as the caller should know this */
1391 if (adev->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1392 return 0;
432729f0
AC
1393 /* PIO3 and higher it is mandatory */
1394 if (adev->pio_mode > XFER_PIO_2)
1395 return 1;
1396 /* We turn it on when possible */
1397 if (ata_id_has_iordy(adev->id))
1bc4ccff 1398 return 1;
432729f0
AC
1399 return 0;
1400}
2e9edbf8 1401
432729f0
AC
1402/**
1403 * ata_pio_mask_no_iordy - Return the non IORDY mask
1404 * @adev: ATA device
1405 *
1406 * Compute the highest mode possible if we are not using iordy. Return
1407 * -1 if no iordy mode is available.
1408 */
1409
1410static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1411{
1bc4ccff 1412 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1413 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1414 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1415 /* Is the speed faster than the drive allows non IORDY ? */
1416 if (pio) {
1417 /* This is cycle times not frequency - watch the logic! */
1418 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1419 return 3 << ATA_SHIFT_PIO;
1420 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1421 }
1422 }
432729f0 1423 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1424}
1425
1da177e4 1426/**
49016aca 1427 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1428 * @dev: target device
1429 * @p_class: pointer to class of the target device (may be changed)
bff04647 1430 * @flags: ATA_READID_* flags
fe635c7e 1431 * @id: buffer to read IDENTIFY data into
1da177e4 1432 *
49016aca
TH
1433 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1434 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1435 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1436 * for pre-ATA4 drives.
1da177e4
LT
1437 *
1438 * LOCKING:
49016aca
TH
1439 * Kernel thread context (may sleep)
1440 *
1441 * RETURNS:
1442 * 0 on success, -errno otherwise.
1da177e4 1443 */
a9beec95 1444int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1445 unsigned int flags, u16 *id)
1da177e4 1446{
3373efd8 1447 struct ata_port *ap = dev->ap;
49016aca 1448 unsigned int class = *p_class;
a0123703 1449 struct ata_taskfile tf;
49016aca
TH
1450 unsigned int err_mask = 0;
1451 const char *reason;
1452 int rc;
1da177e4 1453
0dd4b21f 1454 if (ata_msg_ctl(ap))
44877b4e 1455 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1456
49016aca 1457 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1458
49016aca 1459 retry:
3373efd8 1460 ata_tf_init(dev, &tf);
a0123703 1461
49016aca
TH
1462 switch (class) {
1463 case ATA_DEV_ATA:
a0123703 1464 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1465 break;
1466 case ATA_DEV_ATAPI:
a0123703 1467 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1468 break;
1469 default:
1470 rc = -ENODEV;
1471 reason = "unsupported class";
1472 goto err_out;
1da177e4
LT
1473 }
1474
a0123703 1475 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1476
1477 /* Some devices choke if TF registers contain garbage. Make
1478 * sure those are properly initialized.
1479 */
1480 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1481
1482 /* Device presence detection is unreliable on some
1483 * controllers. Always poll IDENTIFY if available.
1484 */
1485 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1486
3373efd8 1487 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1488 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1489 if (err_mask) {
800b3996 1490 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1491 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1492 ap->print_id, dev->devno);
55a8e2c8
TH
1493 return -ENOENT;
1494 }
1495
49016aca
TH
1496 rc = -EIO;
1497 reason = "I/O error";
1da177e4
LT
1498 goto err_out;
1499 }
1500
49016aca 1501 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1502
49016aca 1503 /* sanity check */
a4f5749b
TH
1504 rc = -EINVAL;
1505 reason = "device reports illegal type";
1506
1507 if (class == ATA_DEV_ATA) {
1508 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1509 goto err_out;
1510 } else {
1511 if (ata_id_is_ata(id))
1512 goto err_out;
49016aca
TH
1513 }
1514
bff04647 1515 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1516 /*
1517 * The exact sequence expected by certain pre-ATA4 drives is:
1518 * SRST RESET
1519 * IDENTIFY
1520 * INITIALIZE DEVICE PARAMETERS
1521 * anything else..
1522 * Some drives were very specific about that exact sequence.
1523 */
1524 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1525 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1526 if (err_mask) {
1527 rc = -EIO;
1528 reason = "INIT_DEV_PARAMS failed";
1529 goto err_out;
1530 }
1531
1532 /* current CHS translation info (id[53-58]) might be
1533 * changed. reread the identify device info.
1534 */
bff04647 1535 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1536 goto retry;
1537 }
1538 }
1539
1540 *p_class = class;
fe635c7e 1541
49016aca
TH
1542 return 0;
1543
1544 err_out:
88574551 1545 if (ata_msg_warn(ap))
0dd4b21f 1546 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1547 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1548 return rc;
1549}
1550
3373efd8 1551static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1552{
3373efd8 1553 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1554}
1555
a6e6ce8e
TH
1556static void ata_dev_config_ncq(struct ata_device *dev,
1557 char *desc, size_t desc_sz)
1558{
1559 struct ata_port *ap = dev->ap;
1560 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1561
1562 if (!ata_id_has_ncq(dev->id)) {
1563 desc[0] = '\0';
1564 return;
1565 }
6919a0a6
AC
1566 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1567 snprintf(desc, desc_sz, "NCQ (not used)");
1568 return;
1569 }
a6e6ce8e 1570 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1571 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1572 dev->flags |= ATA_DFLAG_NCQ;
1573 }
1574
1575 if (hdepth >= ddepth)
1576 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1577 else
1578 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1579}
1580
49016aca 1581/**
ffeae418 1582 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1583 * @dev: Target device to configure
1584 *
1585 * Configure @dev according to @dev->id. Generic and low-level
1586 * driver specific fixups are also applied.
49016aca
TH
1587 *
1588 * LOCKING:
ffeae418
TH
1589 * Kernel thread context (may sleep)
1590 *
1591 * RETURNS:
1592 * 0 on success, -errno otherwise
49016aca 1593 */
efdaedc4 1594int ata_dev_configure(struct ata_device *dev)
49016aca 1595{
3373efd8 1596 struct ata_port *ap = dev->ap;
efdaedc4 1597 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1598 const u16 *id = dev->id;
ff8854b2 1599 unsigned int xfer_mask;
b352e57d 1600 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1601 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1602 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1603 int rc;
49016aca 1604
0dd4b21f 1605 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1606 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1607 __FUNCTION__);
ffeae418 1608 return 0;
49016aca
TH
1609 }
1610
0dd4b21f 1611 if (ata_msg_probe(ap))
44877b4e 1612 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1613
08573a86
KCA
1614 /* set _SDD */
1615 rc = ata_acpi_push_id(ap, dev->devno);
1616 if (rc) {
1617 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1618 rc);
1619 }
1620
1621 /* retrieve and execute the ATA task file of _GTF */
1622 ata_acpi_exec_tfs(ap);
1623
c39f5ebe 1624 /* print device capabilities */
0dd4b21f 1625 if (ata_msg_probe(ap))
88574551
TH
1626 ata_dev_printk(dev, KERN_DEBUG,
1627 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1628 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1629 __FUNCTION__,
f15a1daf
TH
1630 id[49], id[82], id[83], id[84],
1631 id[85], id[86], id[87], id[88]);
c39f5ebe 1632
208a9933 1633 /* initialize to-be-configured parameters */
ea1dd4e1 1634 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1635 dev->max_sectors = 0;
1636 dev->cdb_len = 0;
1637 dev->n_sectors = 0;
1638 dev->cylinders = 0;
1639 dev->heads = 0;
1640 dev->sectors = 0;
1641
1da177e4
LT
1642 /*
1643 * common ATA, ATAPI feature tests
1644 */
1645
ff8854b2 1646 /* find max transfer mode; for printk only */
1148c3a7 1647 xfer_mask = ata_id_xfermask(id);
1da177e4 1648
0dd4b21f
BP
1649 if (ata_msg_probe(ap))
1650 ata_dump_id(id);
1da177e4
LT
1651
1652 /* ATA-specific feature tests */
1653 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1654 if (ata_id_is_cfa(id)) {
1655 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1656 ata_dev_printk(dev, KERN_WARNING,
1657 "supports DRM functions and may "
1658 "not be fully accessable.\n");
b352e57d
AC
1659 snprintf(revbuf, 7, "CFA");
1660 }
1661 else
1662 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1663
1148c3a7 1664 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1665
3f64f565 1666 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1667 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1668 sizeof(fwrevbuf));
1669
591a6e8e 1670 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1671 sizeof(modelbuf));
1672
1673 if (dev->id[59] & 0x100)
1674 dev->multi_count = dev->id[59] & 0xff;
1675
1148c3a7 1676 if (ata_id_has_lba(id)) {
4c2d721a 1677 const char *lba_desc;
a6e6ce8e 1678 char ncq_desc[20];
8bf62ece 1679
4c2d721a
TH
1680 lba_desc = "LBA";
1681 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1682 if (ata_id_has_lba48(id)) {
8bf62ece 1683 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1684 lba_desc = "LBA48";
6fc49adb
TH
1685
1686 if (dev->n_sectors >= (1UL << 28) &&
1687 ata_id_has_flush_ext(id))
1688 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1689 }
8bf62ece 1690
a6e6ce8e
TH
1691 /* config NCQ */
1692 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1693
8bf62ece 1694 /* print device info to dmesg */
3f64f565
EM
1695 if (ata_msg_drv(ap) && print_info) {
1696 ata_dev_printk(dev, KERN_INFO,
1697 "%s: %s, %s, max %s\n",
1698 revbuf, modelbuf, fwrevbuf,
1699 ata_mode_string(xfer_mask));
1700 ata_dev_printk(dev, KERN_INFO,
1701 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1702 (unsigned long long)dev->n_sectors,
3f64f565
EM
1703 dev->multi_count, lba_desc, ncq_desc);
1704 }
ffeae418 1705 } else {
8bf62ece
AL
1706 /* CHS */
1707
1708 /* Default translation */
1148c3a7
TH
1709 dev->cylinders = id[1];
1710 dev->heads = id[3];
1711 dev->sectors = id[6];
8bf62ece 1712
1148c3a7 1713 if (ata_id_current_chs_valid(id)) {
8bf62ece 1714 /* Current CHS translation is valid. */
1148c3a7
TH
1715 dev->cylinders = id[54];
1716 dev->heads = id[55];
1717 dev->sectors = id[56];
8bf62ece
AL
1718 }
1719
1720 /* print device info to dmesg */
3f64f565 1721 if (ata_msg_drv(ap) && print_info) {
88574551 1722 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1723 "%s: %s, %s, max %s\n",
1724 revbuf, modelbuf, fwrevbuf,
1725 ata_mode_string(xfer_mask));
a84471fe 1726 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1727 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1728 (unsigned long long)dev->n_sectors,
1729 dev->multi_count, dev->cylinders,
1730 dev->heads, dev->sectors);
1731 }
07f6f7d0
AL
1732 }
1733
6e7846e9 1734 dev->cdb_len = 16;
1da177e4
LT
1735 }
1736
1737 /* ATAPI-specific feature tests */
2c13b7ce 1738 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1739 char *cdb_intr_string = "";
1740
1148c3a7 1741 rc = atapi_cdb_len(id);
1da177e4 1742 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1743 if (ata_msg_warn(ap))
88574551
TH
1744 ata_dev_printk(dev, KERN_WARNING,
1745 "unsupported CDB len\n");
ffeae418 1746 rc = -EINVAL;
1da177e4
LT
1747 goto err_out_nosup;
1748 }
6e7846e9 1749 dev->cdb_len = (unsigned int) rc;
1da177e4 1750
08a556db 1751 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1752 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1753 cdb_intr_string = ", CDB intr";
1754 }
312f7da2 1755
1da177e4 1756 /* print device info to dmesg */
5afc8142 1757 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1758 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1759 ata_mode_string(xfer_mask),
1760 cdb_intr_string);
1da177e4
LT
1761 }
1762
914ed354
TH
1763 /* determine max_sectors */
1764 dev->max_sectors = ATA_MAX_SECTORS;
1765 if (dev->flags & ATA_DFLAG_LBA48)
1766 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1767
93590859
AC
1768 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1769 /* Let the user know. We don't want to disallow opens for
1770 rescue purposes, or in case the vendor is just a blithering
1771 idiot */
1772 if (print_info) {
1773 ata_dev_printk(dev, KERN_WARNING,
1774"Drive reports diagnostics failure. This may indicate a drive\n");
1775 ata_dev_printk(dev, KERN_WARNING,
1776"fault or invalid emulation. Contact drive vendor for information.\n");
1777 }
1778 }
1779
4b2f3ede 1780 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1781 if (ata_dev_knobble(dev)) {
5afc8142 1782 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1783 ata_dev_printk(dev, KERN_INFO,
1784 "applying bridge limits\n");
5a529139 1785 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1786 dev->max_sectors = ATA_MAX_SECTORS;
1787 }
1788
18d6e9d5 1789 if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
1790 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
1791 dev->max_sectors);
18d6e9d5 1792
6f23a31d
AL
1793 /* limit ATAPI DMA to R/W commands only */
1794 if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
1795 dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
1796
4b2f3ede 1797 if (ap->ops->dev_config)
cd0d3bbc 1798 ap->ops->dev_config(dev);
4b2f3ede 1799
0dd4b21f
BP
1800 if (ata_msg_probe(ap))
1801 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1802 __FUNCTION__, ata_chk_status(ap));
ffeae418 1803 return 0;
1da177e4
LT
1804
1805err_out_nosup:
0dd4b21f 1806 if (ata_msg_probe(ap))
88574551
TH
1807 ata_dev_printk(dev, KERN_DEBUG,
1808 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1809 return rc;
1da177e4
LT
1810}
1811
be0d18df 1812/**
2e41e8e6 1813 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
1814 * @ap: port
1815 *
2e41e8e6 1816 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
1817 * detection.
1818 */
1819
1820int ata_cable_40wire(struct ata_port *ap)
1821{
1822 return ATA_CBL_PATA40;
1823}
1824
1825/**
2e41e8e6 1826 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
1827 * @ap: port
1828 *
2e41e8e6 1829 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
1830 * detection.
1831 */
1832
1833int ata_cable_80wire(struct ata_port *ap)
1834{
1835 return ATA_CBL_PATA80;
1836}
1837
1838/**
1839 * ata_cable_unknown - return unknown PATA cable.
1840 * @ap: port
1841 *
1842 * Helper method for drivers which have no PATA cable detection.
1843 */
1844
1845int ata_cable_unknown(struct ata_port *ap)
1846{
1847 return ATA_CBL_PATA_UNK;
1848}
1849
1850/**
1851 * ata_cable_sata - return SATA cable type
1852 * @ap: port
1853 *
1854 * Helper method for drivers which have SATA cables
1855 */
1856
1857int ata_cable_sata(struct ata_port *ap)
1858{
1859 return ATA_CBL_SATA;
1860}
1861
1da177e4
LT
1862/**
1863 * ata_bus_probe - Reset and probe ATA bus
1864 * @ap: Bus to probe
1865 *
0cba632b
JG
1866 * Master ATA bus probing function. Initiates a hardware-dependent
1867 * bus reset, then attempts to identify any devices found on
1868 * the bus.
1869 *
1da177e4 1870 * LOCKING:
0cba632b 1871 * PCI/etc. bus probe sem.
1da177e4
LT
1872 *
1873 * RETURNS:
96072e69 1874 * Zero on success, negative errno otherwise.
1da177e4
LT
1875 */
1876
80289167 1877int ata_bus_probe(struct ata_port *ap)
1da177e4 1878{
28ca5c57 1879 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 1880 int tries[ATA_MAX_DEVICES];
4ae72a1e 1881 int i, rc;
e82cbdb9 1882 struct ata_device *dev;
1da177e4 1883
28ca5c57 1884 ata_port_probe(ap);
c19ba8af 1885
14d2bac1
TH
1886 for (i = 0; i < ATA_MAX_DEVICES; i++)
1887 tries[i] = ATA_PROBE_MAX_TRIES;
1888
1889 retry:
2044470c 1890 /* reset and determine device classes */
52783c5d 1891 ap->ops->phy_reset(ap);
2061a47a 1892
52783c5d
TH
1893 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1894 dev = &ap->device[i];
c19ba8af 1895
52783c5d
TH
1896 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1897 dev->class != ATA_DEV_UNKNOWN)
1898 classes[dev->devno] = dev->class;
1899 else
1900 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1901
52783c5d 1902 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1903 }
1da177e4 1904
52783c5d 1905 ata_port_probe(ap);
2044470c 1906
b6079ca4
AC
1907 /* after the reset the device state is PIO 0 and the controller
1908 state is undefined. Record the mode */
1909
1910 for (i = 0; i < ATA_MAX_DEVICES; i++)
1911 ap->device[i].pio_mode = XFER_PIO_0;
1912
f31f0cc2
JG
1913 /* read IDENTIFY page and configure devices. We have to do the identify
1914 specific sequence bass-ackwards so that PDIAG- is released by
1915 the slave device */
1916
1917 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
e82cbdb9 1918 dev = &ap->device[i];
28ca5c57 1919
ec573755
TH
1920 if (tries[i])
1921 dev->class = classes[i];
ffeae418 1922
14d2bac1 1923 if (!ata_dev_enabled(dev))
ffeae418 1924 continue;
ffeae418 1925
bff04647
TH
1926 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1927 dev->id);
14d2bac1
TH
1928 if (rc)
1929 goto fail;
f31f0cc2
JG
1930 }
1931
be0d18df
AC
1932 /* Now ask for the cable type as PDIAG- should have been released */
1933 if (ap->ops->cable_detect)
1934 ap->cbl = ap->ops->cable_detect(ap);
1935
f31f0cc2
JG
1936 /* After the identify sequence we can now set up the devices. We do
1937 this in the normal order so that the user doesn't get confused */
1938
1939 for(i = 0; i < ATA_MAX_DEVICES; i++) {
1940 dev = &ap->device[i];
1941 if (!ata_dev_enabled(dev))
1942 continue;
14d2bac1 1943
efdaedc4
TH
1944 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1945 rc = ata_dev_configure(dev);
1946 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1947 if (rc)
1948 goto fail;
1da177e4
LT
1949 }
1950
e82cbdb9 1951 /* configure transfer mode */
3adcebb2 1952 rc = ata_set_mode(ap, &dev);
4ae72a1e 1953 if (rc)
51713d35 1954 goto fail;
1da177e4 1955
e82cbdb9
TH
1956 for (i = 0; i < ATA_MAX_DEVICES; i++)
1957 if (ata_dev_enabled(&ap->device[i]))
1958 return 0;
1da177e4 1959
e82cbdb9
TH
1960 /* no device present, disable port */
1961 ata_port_disable(ap);
1da177e4 1962 ap->ops->port_disable(ap);
96072e69 1963 return -ENODEV;
14d2bac1
TH
1964
1965 fail:
4ae72a1e
TH
1966 tries[dev->devno]--;
1967
14d2bac1
TH
1968 switch (rc) {
1969 case -EINVAL:
4ae72a1e 1970 /* eeek, something went very wrong, give up */
14d2bac1
TH
1971 tries[dev->devno] = 0;
1972 break;
4ae72a1e
TH
1973
1974 case -ENODEV:
1975 /* give it just one more chance */
1976 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 1977 case -EIO:
4ae72a1e
TH
1978 if (tries[dev->devno] == 1) {
1979 /* This is the last chance, better to slow
1980 * down than lose it.
1981 */
1982 sata_down_spd_limit(ap);
1983 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
1984 }
14d2bac1
TH
1985 }
1986
4ae72a1e 1987 if (!tries[dev->devno])
3373efd8 1988 ata_dev_disable(dev);
ec573755 1989
14d2bac1 1990 goto retry;
1da177e4
LT
1991}
1992
1993/**
0cba632b
JG
1994 * ata_port_probe - Mark port as enabled
1995 * @ap: Port for which we indicate enablement
1da177e4 1996 *
0cba632b
JG
1997 * Modify @ap data structure such that the system
1998 * thinks that the entire port is enabled.
1999 *
cca3974e 2000 * LOCKING: host lock, or some other form of
0cba632b 2001 * serialization.
1da177e4
LT
2002 */
2003
2004void ata_port_probe(struct ata_port *ap)
2005{
198e0fed 2006 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2007}
2008
3be680b7
TH
2009/**
2010 * sata_print_link_status - Print SATA link status
2011 * @ap: SATA port to printk link status about
2012 *
2013 * This function prints link speed and status of a SATA link.
2014 *
2015 * LOCKING:
2016 * None.
2017 */
43727fbc 2018void sata_print_link_status(struct ata_port *ap)
3be680b7 2019{
6d5f9732 2020 u32 sstatus, scontrol, tmp;
3be680b7 2021
81952c54 2022 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 2023 return;
81952c54 2024 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 2025
81952c54 2026 if (ata_port_online(ap)) {
3be680b7 2027 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
2028 ata_port_printk(ap, KERN_INFO,
2029 "SATA link up %s (SStatus %X SControl %X)\n",
2030 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2031 } else {
f15a1daf
TH
2032 ata_port_printk(ap, KERN_INFO,
2033 "SATA link down (SStatus %X SControl %X)\n",
2034 sstatus, scontrol);
3be680b7
TH
2035 }
2036}
2037
1da177e4 2038/**
780a87f7
JG
2039 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2040 * @ap: SATA port associated with target SATA PHY.
1da177e4 2041 *
780a87f7
JG
2042 * This function issues commands to standard SATA Sxxx
2043 * PHY registers, to wake up the phy (and device), and
2044 * clear any reset condition.
1da177e4
LT
2045 *
2046 * LOCKING:
0cba632b 2047 * PCI/etc. bus probe sem.
1da177e4
LT
2048 *
2049 */
2050void __sata_phy_reset(struct ata_port *ap)
2051{
2052 u32 sstatus;
2053 unsigned long timeout = jiffies + (HZ * 5);
2054
2055 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2056 /* issue phy wake/reset */
81952c54 2057 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
2058 /* Couldn't find anything in SATA I/II specs, but
2059 * AHCI-1.1 10.4.2 says at least 1 ms. */
2060 mdelay(1);
1da177e4 2061 }
81952c54
TH
2062 /* phy wake/clear reset */
2063 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
2064
2065 /* wait for phy to become ready, if necessary */
2066 do {
2067 msleep(200);
81952c54 2068 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
2069 if ((sstatus & 0xf) != 1)
2070 break;
2071 } while (time_before(jiffies, timeout));
2072
3be680b7
TH
2073 /* print link status */
2074 sata_print_link_status(ap);
656563e3 2075
3be680b7 2076 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2077 if (!ata_port_offline(ap))
1da177e4 2078 ata_port_probe(ap);
3be680b7 2079 else
1da177e4 2080 ata_port_disable(ap);
1da177e4 2081
198e0fed 2082 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2083 return;
2084
2085 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2086 ata_port_disable(ap);
2087 return;
2088 }
2089
2090 ap->cbl = ATA_CBL_SATA;
2091}
2092
2093/**
780a87f7
JG
2094 * sata_phy_reset - Reset SATA bus.
2095 * @ap: SATA port associated with target SATA PHY.
1da177e4 2096 *
780a87f7
JG
2097 * This function resets the SATA bus, and then probes
2098 * the bus for devices.
1da177e4
LT
2099 *
2100 * LOCKING:
0cba632b 2101 * PCI/etc. bus probe sem.
1da177e4
LT
2102 *
2103 */
2104void sata_phy_reset(struct ata_port *ap)
2105{
2106 __sata_phy_reset(ap);
198e0fed 2107 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2108 return;
2109 ata_bus_reset(ap);
2110}
2111
ebdfca6e
AC
2112/**
2113 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2114 * @adev: device
2115 *
2116 * Obtain the other device on the same cable, or if none is
2117 * present NULL is returned
2118 */
2e9edbf8 2119
3373efd8 2120struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2121{
3373efd8 2122 struct ata_port *ap = adev->ap;
ebdfca6e 2123 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2124 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2125 return NULL;
2126 return pair;
2127}
2128
1da177e4 2129/**
780a87f7
JG
2130 * ata_port_disable - Disable port.
2131 * @ap: Port to be disabled.
1da177e4 2132 *
780a87f7
JG
2133 * Modify @ap data structure such that the system
2134 * thinks that the entire port is disabled, and should
2135 * never attempt to probe or communicate with devices
2136 * on this port.
2137 *
cca3974e 2138 * LOCKING: host lock, or some other form of
780a87f7 2139 * serialization.
1da177e4
LT
2140 */
2141
2142void ata_port_disable(struct ata_port *ap)
2143{
2144 ap->device[0].class = ATA_DEV_NONE;
2145 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2146 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2147}
2148
1c3fae4d 2149/**
3c567b7d 2150 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2151 * @ap: Port to adjust SATA spd limit for
2152 *
2153 * Adjust SATA spd limit of @ap downward. Note that this
2154 * function only adjusts the limit. The change must be applied
3c567b7d 2155 * using sata_set_spd().
1c3fae4d
TH
2156 *
2157 * LOCKING:
2158 * Inherited from caller.
2159 *
2160 * RETURNS:
2161 * 0 on success, negative errno on failure
2162 */
3c567b7d 2163int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2164{
81952c54
TH
2165 u32 sstatus, spd, mask;
2166 int rc, highbit;
1c3fae4d 2167
81952c54
TH
2168 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2169 if (rc)
2170 return rc;
1c3fae4d
TH
2171
2172 mask = ap->sata_spd_limit;
2173 if (mask <= 1)
2174 return -EINVAL;
2175 highbit = fls(mask) - 1;
2176 mask &= ~(1 << highbit);
2177
81952c54 2178 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2179 if (spd <= 1)
2180 return -EINVAL;
2181 spd--;
2182 mask &= (1 << spd) - 1;
2183 if (!mask)
2184 return -EINVAL;
2185
2186 ap->sata_spd_limit = mask;
2187
f15a1daf
TH
2188 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2189 sata_spd_string(fls(mask)));
1c3fae4d
TH
2190
2191 return 0;
2192}
2193
3c567b7d 2194static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2195{
2196 u32 spd, limit;
2197
2198 if (ap->sata_spd_limit == UINT_MAX)
2199 limit = 0;
2200 else
2201 limit = fls(ap->sata_spd_limit);
2202
2203 spd = (*scontrol >> 4) & 0xf;
2204 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2205
2206 return spd != limit;
2207}
2208
2209/**
3c567b7d 2210 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2211 * @ap: Port in question
2212 *
2213 * Test whether the spd limit in SControl matches
2214 * @ap->sata_spd_limit. This function is used to determine
2215 * whether hardreset is necessary to apply SATA spd
2216 * configuration.
2217 *
2218 * LOCKING:
2219 * Inherited from caller.
2220 *
2221 * RETURNS:
2222 * 1 if SATA spd configuration is needed, 0 otherwise.
2223 */
3c567b7d 2224int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2225{
2226 u32 scontrol;
2227
81952c54 2228 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2229 return 0;
2230
3c567b7d 2231 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2232}
2233
2234/**
3c567b7d 2235 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2236 * @ap: Port to set SATA spd for
2237 *
2238 * Set SATA spd of @ap according to sata_spd_limit.
2239 *
2240 * LOCKING:
2241 * Inherited from caller.
2242 *
2243 * RETURNS:
2244 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2245 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2246 */
3c567b7d 2247int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2248{
2249 u32 scontrol;
81952c54 2250 int rc;
1c3fae4d 2251
81952c54
TH
2252 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2253 return rc;
1c3fae4d 2254
3c567b7d 2255 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2256 return 0;
2257
81952c54
TH
2258 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2259 return rc;
2260
1c3fae4d
TH
2261 return 1;
2262}
2263
452503f9
AC
2264/*
2265 * This mode timing computation functionality is ported over from
2266 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2267 */
2268/*
b352e57d 2269 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2270 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2271 * for UDMA6, which is currently supported only by Maxtor drives.
2272 *
2273 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2274 */
2275
2276static const struct ata_timing ata_timing[] = {
2277
2278 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2279 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2280 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2281 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2282
b352e57d
AC
2283 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2284 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2285 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2286 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2287 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2288
2289/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2290
452503f9
AC
2291 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2292 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2293 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2294
452503f9
AC
2295 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2296 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2297 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2298
b352e57d
AC
2299 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2300 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2301 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2302 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2303
2304 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2305 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2306 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2307
2308/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2309
2310 { 0xFF }
2311};
2312
2313#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2314#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2315
2316static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2317{
2318 q->setup = EZ(t->setup * 1000, T);
2319 q->act8b = EZ(t->act8b * 1000, T);
2320 q->rec8b = EZ(t->rec8b * 1000, T);
2321 q->cyc8b = EZ(t->cyc8b * 1000, T);
2322 q->active = EZ(t->active * 1000, T);
2323 q->recover = EZ(t->recover * 1000, T);
2324 q->cycle = EZ(t->cycle * 1000, T);
2325 q->udma = EZ(t->udma * 1000, UT);
2326}
2327
2328void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2329 struct ata_timing *m, unsigned int what)
2330{
2331 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2332 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2333 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2334 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2335 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2336 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2337 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2338 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2339}
2340
2341static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2342{
2343 const struct ata_timing *t;
2344
2345 for (t = ata_timing; t->mode != speed; t++)
91190758 2346 if (t->mode == 0xFF)
452503f9 2347 return NULL;
2e9edbf8 2348 return t;
452503f9
AC
2349}
2350
2351int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2352 struct ata_timing *t, int T, int UT)
2353{
2354 const struct ata_timing *s;
2355 struct ata_timing p;
2356
2357 /*
2e9edbf8 2358 * Find the mode.
75b1f2f8 2359 */
452503f9
AC
2360
2361 if (!(s = ata_timing_find_mode(speed)))
2362 return -EINVAL;
2363
75b1f2f8
AL
2364 memcpy(t, s, sizeof(*s));
2365
452503f9
AC
2366 /*
2367 * If the drive is an EIDE drive, it can tell us it needs extended
2368 * PIO/MW_DMA cycle timing.
2369 */
2370
2371 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2372 memset(&p, 0, sizeof(p));
2373 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2374 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2375 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2376 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2377 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2378 }
2379 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2380 }
2381
2382 /*
2383 * Convert the timing to bus clock counts.
2384 */
2385
75b1f2f8 2386 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2387
2388 /*
c893a3ae
RD
2389 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2390 * S.M.A.R.T * and some other commands. We have to ensure that the
2391 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2392 */
2393
fd3367af 2394 if (speed > XFER_PIO_6) {
452503f9
AC
2395 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2396 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2397 }
2398
2399 /*
c893a3ae 2400 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2401 */
2402
2403 if (t->act8b + t->rec8b < t->cyc8b) {
2404 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2405 t->rec8b = t->cyc8b - t->act8b;
2406 }
2407
2408 if (t->active + t->recover < t->cycle) {
2409 t->active += (t->cycle - (t->active + t->recover)) / 2;
2410 t->recover = t->cycle - t->active;
2411 }
2412
2413 return 0;
2414}
2415
cf176e1a
TH
2416/**
2417 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2418 * @dev: Device to adjust xfer masks
458337db 2419 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2420 *
2421 * Adjust xfer masks of @dev downward. Note that this function
2422 * does not apply the change. Invoking ata_set_mode() afterwards
2423 * will apply the limit.
2424 *
2425 * LOCKING:
2426 * Inherited from caller.
2427 *
2428 * RETURNS:
2429 * 0 on success, negative errno on failure
2430 */
458337db 2431int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2432{
458337db
TH
2433 char buf[32];
2434 unsigned int orig_mask, xfer_mask;
2435 unsigned int pio_mask, mwdma_mask, udma_mask;
2436 int quiet, highbit;
cf176e1a 2437
458337db
TH
2438 quiet = !!(sel & ATA_DNXFER_QUIET);
2439 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2440
458337db
TH
2441 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2442 dev->mwdma_mask,
2443 dev->udma_mask);
2444 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2445
458337db
TH
2446 switch (sel) {
2447 case ATA_DNXFER_PIO:
2448 highbit = fls(pio_mask) - 1;
2449 pio_mask &= ~(1 << highbit);
2450 break;
2451
2452 case ATA_DNXFER_DMA:
2453 if (udma_mask) {
2454 highbit = fls(udma_mask) - 1;
2455 udma_mask &= ~(1 << highbit);
2456 if (!udma_mask)
2457 return -ENOENT;
2458 } else if (mwdma_mask) {
2459 highbit = fls(mwdma_mask) - 1;
2460 mwdma_mask &= ~(1 << highbit);
2461 if (!mwdma_mask)
2462 return -ENOENT;
2463 }
2464 break;
2465
2466 case ATA_DNXFER_40C:
2467 udma_mask &= ATA_UDMA_MASK_40C;
2468 break;
2469
2470 case ATA_DNXFER_FORCE_PIO0:
2471 pio_mask &= 1;
2472 case ATA_DNXFER_FORCE_PIO:
2473 mwdma_mask = 0;
2474 udma_mask = 0;
2475 break;
2476
458337db
TH
2477 default:
2478 BUG();
2479 }
2480
2481 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2482
2483 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2484 return -ENOENT;
2485
2486 if (!quiet) {
2487 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2488 snprintf(buf, sizeof(buf), "%s:%s",
2489 ata_mode_string(xfer_mask),
2490 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2491 else
2492 snprintf(buf, sizeof(buf), "%s",
2493 ata_mode_string(xfer_mask));
2494
2495 ata_dev_printk(dev, KERN_WARNING,
2496 "limiting speed to %s\n", buf);
2497 }
cf176e1a
TH
2498
2499 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2500 &dev->udma_mask);
2501
cf176e1a 2502 return 0;
cf176e1a
TH
2503}
2504
3373efd8 2505static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2506{
baa1e78a 2507 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2508 unsigned int err_mask;
2509 int rc;
1da177e4 2510
e8384607 2511 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2512 if (dev->xfer_shift == ATA_SHIFT_PIO)
2513 dev->flags |= ATA_DFLAG_PIO;
2514
3373efd8 2515 err_mask = ata_dev_set_xfermode(dev);
11750a40
AC
2516 /* Old CFA may refuse this command, which is just fine */
2517 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2518 err_mask &= ~AC_ERR_DEV;
2519
83206a29 2520 if (err_mask) {
f15a1daf
TH
2521 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2522 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2523 return -EIO;
2524 }
1da177e4 2525
baa1e78a 2526 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2527 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2528 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2529 if (rc)
83206a29 2530 return rc;
48a8a14f 2531
23e71c3d
TH
2532 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2533 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2534
f15a1daf
TH
2535 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2536 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2537 return 0;
1da177e4
LT
2538}
2539
1da177e4 2540/**
04351821 2541 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
1da177e4 2542 * @ap: port on which timings will be programmed
e82cbdb9 2543 * @r_failed_dev: out paramter for failed device
1da177e4 2544 *
04351821
AC
2545 * Standard implementation of the function used to tune and set
2546 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2547 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2548 * returned in @r_failed_dev.
780a87f7 2549 *
1da177e4 2550 * LOCKING:
0cba632b 2551 * PCI/etc. bus probe sem.
e82cbdb9
TH
2552 *
2553 * RETURNS:
2554 * 0 on success, negative errno otherwise
1da177e4 2555 */
04351821
AC
2556
2557int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2558{
e8e0619f 2559 struct ata_device *dev;
e82cbdb9 2560 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2561
3adcebb2 2562
a6d5a51c
TH
2563 /* step 1: calculate xfer_mask */
2564 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2565 unsigned int pio_mask, dma_mask;
a6d5a51c 2566
e8e0619f
TH
2567 dev = &ap->device[i];
2568
e1211e3f 2569 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2570 continue;
2571
3373efd8 2572 ata_dev_xfermask(dev);
1da177e4 2573
acf356b1
TH
2574 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2575 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2576 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2577 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2578
4f65977d 2579 found = 1;
5444a6f4
AC
2580 if (dev->dma_mode)
2581 used_dma = 1;
a6d5a51c 2582 }
4f65977d 2583 if (!found)
e82cbdb9 2584 goto out;
a6d5a51c
TH
2585
2586 /* step 2: always set host PIO timings */
e8e0619f
TH
2587 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2588 dev = &ap->device[i];
2589 if (!ata_dev_enabled(dev))
2590 continue;
2591
2592 if (!dev->pio_mode) {
f15a1daf 2593 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2594 rc = -EINVAL;
e82cbdb9 2595 goto out;
e8e0619f
TH
2596 }
2597
2598 dev->xfer_mode = dev->pio_mode;
2599 dev->xfer_shift = ATA_SHIFT_PIO;
2600 if (ap->ops->set_piomode)
2601 ap->ops->set_piomode(ap, dev);
2602 }
1da177e4 2603
a6d5a51c 2604 /* step 3: set host DMA timings */
e8e0619f
TH
2605 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2606 dev = &ap->device[i];
2607
2608 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2609 continue;
2610
2611 dev->xfer_mode = dev->dma_mode;
2612 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2613 if (ap->ops->set_dmamode)
2614 ap->ops->set_dmamode(ap, dev);
2615 }
1da177e4
LT
2616
2617 /* step 4: update devices' xfer mode */
83206a29 2618 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2619 dev = &ap->device[i];
1da177e4 2620
18d90deb 2621 /* don't update suspended devices' xfer mode */
02670bf3 2622 if (!ata_dev_ready(dev))
83206a29
TH
2623 continue;
2624
3373efd8 2625 rc = ata_dev_set_mode(dev);
5bbc53f4 2626 if (rc)
e82cbdb9 2627 goto out;
83206a29 2628 }
1da177e4 2629
e8e0619f
TH
2630 /* Record simplex status. If we selected DMA then the other
2631 * host channels are not permitted to do so.
5444a6f4 2632 */
cca3974e 2633 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2634 ap->host->simplex_claimed = ap;
5444a6f4 2635
e8e0619f 2636 /* step5: chip specific finalisation */
1da177e4
LT
2637 if (ap->ops->post_set_mode)
2638 ap->ops->post_set_mode(ap);
e82cbdb9
TH
2639 out:
2640 if (rc)
2641 *r_failed_dev = dev;
2642 return rc;
1da177e4
LT
2643}
2644
04351821
AC
2645/**
2646 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2647 * @ap: port on which timings will be programmed
2648 * @r_failed_dev: out paramter for failed device
2649 *
2650 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2651 * ata_set_mode() fails, pointer to the failing device is
2652 * returned in @r_failed_dev.
2653 *
2654 * LOCKING:
2655 * PCI/etc. bus probe sem.
2656 *
2657 * RETURNS:
2658 * 0 on success, negative errno otherwise
2659 */
2660int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2661{
2662 /* has private set_mode? */
2663 if (ap->ops->set_mode)
2664 return ap->ops->set_mode(ap, r_failed_dev);
2665 return ata_do_set_mode(ap, r_failed_dev);
2666}
2667
1fdffbce
JG
2668/**
2669 * ata_tf_to_host - issue ATA taskfile to host controller
2670 * @ap: port to which command is being issued
2671 * @tf: ATA taskfile register set
2672 *
2673 * Issues ATA taskfile register set to ATA host controller,
2674 * with proper synchronization with interrupt handler and
2675 * other threads.
2676 *
2677 * LOCKING:
cca3974e 2678 * spin_lock_irqsave(host lock)
1fdffbce
JG
2679 */
2680
2681static inline void ata_tf_to_host(struct ata_port *ap,
2682 const struct ata_taskfile *tf)
2683{
2684 ap->ops->tf_load(ap, tf);
2685 ap->ops->exec_command(ap, tf);
2686}
2687
1da177e4
LT
2688/**
2689 * ata_busy_sleep - sleep until BSY clears, or timeout
2690 * @ap: port containing status register to be polled
2691 * @tmout_pat: impatience timeout
2692 * @tmout: overall timeout
2693 *
780a87f7
JG
2694 * Sleep until ATA Status register bit BSY clears,
2695 * or a timeout occurs.
2696 *
d1adc1bb
TH
2697 * LOCKING:
2698 * Kernel thread context (may sleep).
2699 *
2700 * RETURNS:
2701 * 0 on success, -errno otherwise.
1da177e4 2702 */
d1adc1bb
TH
2703int ata_busy_sleep(struct ata_port *ap,
2704 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2705{
2706 unsigned long timer_start, timeout;
2707 u8 status;
2708
2709 status = ata_busy_wait(ap, ATA_BUSY, 300);
2710 timer_start = jiffies;
2711 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2712 while (status != 0xff && (status & ATA_BUSY) &&
2713 time_before(jiffies, timeout)) {
1da177e4
LT
2714 msleep(50);
2715 status = ata_busy_wait(ap, ATA_BUSY, 3);
2716 }
2717
d1adc1bb 2718 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2719 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2720 "port is slow to respond, please be patient "
2721 "(Status 0x%x)\n", status);
1da177e4
LT
2722
2723 timeout = timer_start + tmout;
d1adc1bb
TH
2724 while (status != 0xff && (status & ATA_BUSY) &&
2725 time_before(jiffies, timeout)) {
1da177e4
LT
2726 msleep(50);
2727 status = ata_chk_status(ap);
2728 }
2729
d1adc1bb
TH
2730 if (status == 0xff)
2731 return -ENODEV;
2732
1da177e4 2733 if (status & ATA_BUSY) {
f15a1daf 2734 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2735 "(%lu secs, Status 0x%x)\n",
2736 tmout / HZ, status);
d1adc1bb 2737 return -EBUSY;
1da177e4
LT
2738 }
2739
2740 return 0;
2741}
2742
2743static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2744{
2745 struct ata_ioports *ioaddr = &ap->ioaddr;
2746 unsigned int dev0 = devmask & (1 << 0);
2747 unsigned int dev1 = devmask & (1 << 1);
2748 unsigned long timeout;
2749
2750 /* if device 0 was found in ata_devchk, wait for its
2751 * BSY bit to clear
2752 */
2753 if (dev0)
2754 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2755
2756 /* if device 1 was found in ata_devchk, wait for
2757 * register access, then wait for BSY to clear
2758 */
2759 timeout = jiffies + ATA_TMOUT_BOOT;
2760 while (dev1) {
2761 u8 nsect, lbal;
2762
2763 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2764 nsect = ioread8(ioaddr->nsect_addr);
2765 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2766 if ((nsect == 1) && (lbal == 1))
2767 break;
2768 if (time_after(jiffies, timeout)) {
2769 dev1 = 0;
2770 break;
2771 }
2772 msleep(50); /* give drive a breather */
2773 }
2774 if (dev1)
2775 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2776
2777 /* is all this really necessary? */
2778 ap->ops->dev_select(ap, 0);
2779 if (dev1)
2780 ap->ops->dev_select(ap, 1);
2781 if (dev0)
2782 ap->ops->dev_select(ap, 0);
2783}
2784
1da177e4
LT
2785static unsigned int ata_bus_softreset(struct ata_port *ap,
2786 unsigned int devmask)
2787{
2788 struct ata_ioports *ioaddr = &ap->ioaddr;
2789
44877b4e 2790 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
2791
2792 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2793 iowrite8(ap->ctl, ioaddr->ctl_addr);
2794 udelay(20); /* FIXME: flush */
2795 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2796 udelay(20); /* FIXME: flush */
2797 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2798
2799 /* spec mandates ">= 2ms" before checking status.
2800 * We wait 150ms, because that was the magic delay used for
2801 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2802 * between when the ATA command register is written, and then
2803 * status is checked. Because waiting for "a while" before
2804 * checking status is fine, post SRST, we perform this magic
2805 * delay here as well.
09c7ad79
AC
2806 *
2807 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2808 */
2809 msleep(150);
2810
2e9edbf8 2811 /* Before we perform post reset processing we want to see if
298a41ca
TH
2812 * the bus shows 0xFF because the odd clown forgets the D7
2813 * pulldown resistor.
2814 */
d1adc1bb
TH
2815 if (ata_check_status(ap) == 0xFF)
2816 return 0;
09c7ad79 2817
1da177e4
LT
2818 ata_bus_post_reset(ap, devmask);
2819
2820 return 0;
2821}
2822
2823/**
2824 * ata_bus_reset - reset host port and associated ATA channel
2825 * @ap: port to reset
2826 *
2827 * This is typically the first time we actually start issuing
2828 * commands to the ATA channel. We wait for BSY to clear, then
2829 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2830 * result. Determine what devices, if any, are on the channel
2831 * by looking at the device 0/1 error register. Look at the signature
2832 * stored in each device's taskfile registers, to determine if
2833 * the device is ATA or ATAPI.
2834 *
2835 * LOCKING:
0cba632b 2836 * PCI/etc. bus probe sem.
cca3974e 2837 * Obtains host lock.
1da177e4
LT
2838 *
2839 * SIDE EFFECTS:
198e0fed 2840 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2841 */
2842
2843void ata_bus_reset(struct ata_port *ap)
2844{
2845 struct ata_ioports *ioaddr = &ap->ioaddr;
2846 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2847 u8 err;
aec5c3c1 2848 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4 2849
44877b4e 2850 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
2851
2852 /* determine if device 0/1 are present */
2853 if (ap->flags & ATA_FLAG_SATA_RESET)
2854 dev0 = 1;
2855 else {
2856 dev0 = ata_devchk(ap, 0);
2857 if (slave_possible)
2858 dev1 = ata_devchk(ap, 1);
2859 }
2860
2861 if (dev0)
2862 devmask |= (1 << 0);
2863 if (dev1)
2864 devmask |= (1 << 1);
2865
2866 /* select device 0 again */
2867 ap->ops->dev_select(ap, 0);
2868
2869 /* issue bus reset */
2870 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2871 if (ata_bus_softreset(ap, devmask))
2872 goto err_out;
1da177e4
LT
2873
2874 /*
2875 * determine by signature whether we have ATA or ATAPI devices
2876 */
b4dc7623 2877 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2878 if ((slave_possible) && (err != 0x81))
b4dc7623 2879 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2880
2881 /* re-enable interrupts */
83625006 2882 ap->ops->irq_on(ap);
1da177e4
LT
2883
2884 /* is double-select really necessary? */
2885 if (ap->device[1].class != ATA_DEV_NONE)
2886 ap->ops->dev_select(ap, 1);
2887 if (ap->device[0].class != ATA_DEV_NONE)
2888 ap->ops->dev_select(ap, 0);
2889
2890 /* if no devices were detected, disable this port */
2891 if ((ap->device[0].class == ATA_DEV_NONE) &&
2892 (ap->device[1].class == ATA_DEV_NONE))
2893 goto err_out;
2894
2895 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2896 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2897 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2898 }
2899
2900 DPRINTK("EXIT\n");
2901 return;
2902
2903err_out:
f15a1daf 2904 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2905 ap->ops->port_disable(ap);
2906
2907 DPRINTK("EXIT\n");
2908}
2909
d7bb4cc7
TH
2910/**
2911 * sata_phy_debounce - debounce SATA phy status
2912 * @ap: ATA port to debounce SATA phy status for
2913 * @params: timing parameters { interval, duratinon, timeout } in msec
2914 *
2915 * Make sure SStatus of @ap reaches stable state, determined by
2916 * holding the same value where DET is not 1 for @duration polled
2917 * every @interval, before @timeout. Timeout constraints the
2918 * beginning of the stable state. Because, after hot unplugging,
2919 * DET gets stuck at 1 on some controllers, this functions waits
2920 * until timeout then returns 0 if DET is stable at 1.
2921 *
2922 * LOCKING:
2923 * Kernel thread context (may sleep)
2924 *
2925 * RETURNS:
2926 * 0 on success, -errno on failure.
2927 */
2928int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2929{
d7bb4cc7
TH
2930 unsigned long interval_msec = params[0];
2931 unsigned long duration = params[1] * HZ / 1000;
2932 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2933 unsigned long last_jiffies;
2934 u32 last, cur;
2935 int rc;
2936
2937 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2938 return rc;
2939 cur &= 0xf;
2940
2941 last = cur;
2942 last_jiffies = jiffies;
2943
2944 while (1) {
2945 msleep(interval_msec);
2946 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2947 return rc;
2948 cur &= 0xf;
2949
2950 /* DET stable? */
2951 if (cur == last) {
2952 if (cur == 1 && time_before(jiffies, timeout))
2953 continue;
2954 if (time_after(jiffies, last_jiffies + duration))
2955 return 0;
2956 continue;
2957 }
2958
2959 /* unstable, start over */
2960 last = cur;
2961 last_jiffies = jiffies;
2962
2963 /* check timeout */
2964 if (time_after(jiffies, timeout))
2965 return -EBUSY;
2966 }
2967}
2968
2969/**
2970 * sata_phy_resume - resume SATA phy
2971 * @ap: ATA port to resume SATA phy for
2972 * @params: timing parameters { interval, duratinon, timeout } in msec
2973 *
2974 * Resume SATA phy of @ap and debounce it.
2975 *
2976 * LOCKING:
2977 * Kernel thread context (may sleep)
2978 *
2979 * RETURNS:
2980 * 0 on success, -errno on failure.
2981 */
2982int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2983{
2984 u32 scontrol;
81952c54
TH
2985 int rc;
2986
2987 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2988 return rc;
7a7921e8 2989
852ee16a 2990 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2991
2992 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2993 return rc;
7a7921e8 2994
d7bb4cc7
TH
2995 /* Some PHYs react badly if SStatus is pounded immediately
2996 * after resuming. Delay 200ms before debouncing.
2997 */
2998 msleep(200);
7a7921e8 2999
d7bb4cc7 3000 return sata_phy_debounce(ap, params);
7a7921e8
TH
3001}
3002
f5914a46
TH
3003static void ata_wait_spinup(struct ata_port *ap)
3004{
3005 struct ata_eh_context *ehc = &ap->eh_context;
3006 unsigned long end, secs;
3007 int rc;
3008
3009 /* first, debounce phy if SATA */
3010 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 3011 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
3012
3013 /* if debounced successfully and offline, no need to wait */
3014 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
3015 return;
3016 }
3017
3018 /* okay, let's give the drive time to spin up */
3019 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
3020 secs = ((end - jiffies) + HZ - 1) / HZ;
3021
3022 if (time_after(jiffies, end))
3023 return;
3024
3025 if (secs > 5)
3026 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
3027 "(%lu secs)\n", secs);
3028
3029 schedule_timeout_uninterruptible(end - jiffies);
3030}
3031
3032/**
3033 * ata_std_prereset - prepare for reset
3034 * @ap: ATA port to be reset
3035 *
3036 * @ap is about to be reset. Initialize it.
3037 *
3038 * LOCKING:
3039 * Kernel thread context (may sleep)
3040 *
3041 * RETURNS:
3042 * 0 on success, -errno otherwise.
3043 */
3044int ata_std_prereset(struct ata_port *ap)
3045{
3046 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 3047 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3048 int rc;
3049
28324304
TH
3050 /* handle link resume & hotplug spinup */
3051 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3052 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3053 ehc->i.action |= ATA_EH_HARDRESET;
3054
3055 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
3056 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
3057 ata_wait_spinup(ap);
f5914a46
TH
3058
3059 /* if we're about to do hardreset, nothing more to do */
3060 if (ehc->i.action & ATA_EH_HARDRESET)
3061 return 0;
3062
3063 /* if SATA, resume phy */
3064 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
3065 rc = sata_phy_resume(ap, timing);
3066 if (rc && rc != -EOPNOTSUPP) {
3067 /* phy resume failed */
3068 ata_port_printk(ap, KERN_WARNING, "failed to resume "
3069 "link for reset (errno=%d)\n", rc);
3070 return rc;
3071 }
3072 }
3073
3074 /* Wait for !BSY if the controller can wait for the first D2H
3075 * Reg FIS and we don't know that no device is attached.
3076 */
3077 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
3078 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
3079
3080 return 0;
3081}
3082
c2bd5804
TH
3083/**
3084 * ata_std_softreset - reset host port via ATA SRST
3085 * @ap: port to reset
c2bd5804
TH
3086 * @classes: resulting classes of attached devices
3087 *
52783c5d 3088 * Reset host port using ATA SRST.
c2bd5804
TH
3089 *
3090 * LOCKING:
3091 * Kernel thread context (may sleep)
3092 *
3093 * RETURNS:
3094 * 0 on success, -errno otherwise.
3095 */
2bf2cb26 3096int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
3097{
3098 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3099 unsigned int devmask = 0, err_mask;
3100 u8 err;
3101
3102 DPRINTK("ENTER\n");
3103
81952c54 3104 if (ata_port_offline(ap)) {
3a39746a
TH
3105 classes[0] = ATA_DEV_NONE;
3106 goto out;
3107 }
3108
c2bd5804
TH
3109 /* determine if device 0/1 are present */
3110 if (ata_devchk(ap, 0))
3111 devmask |= (1 << 0);
3112 if (slave_possible && ata_devchk(ap, 1))
3113 devmask |= (1 << 1);
3114
c2bd5804
TH
3115 /* select device 0 again */
3116 ap->ops->dev_select(ap, 0);
3117
3118 /* issue bus reset */
3119 DPRINTK("about to softreset, devmask=%x\n", devmask);
3120 err_mask = ata_bus_softreset(ap, devmask);
3121 if (err_mask) {
f15a1daf
TH
3122 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
3123 err_mask);
c2bd5804
TH
3124 return -EIO;
3125 }
3126
3127 /* determine by signature whether we have ATA or ATAPI devices */
3128 classes[0] = ata_dev_try_classify(ap, 0, &err);
3129 if (slave_possible && err != 0x81)
3130 classes[1] = ata_dev_try_classify(ap, 1, &err);
3131
3a39746a 3132 out:
c2bd5804
TH
3133 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3134 return 0;
3135}
3136
3137/**
b6103f6d 3138 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3139 * @ap: port to reset
b6103f6d 3140 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
3141 *
3142 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3143 *
3144 * LOCKING:
3145 * Kernel thread context (may sleep)
3146 *
3147 * RETURNS:
3148 * 0 on success, -errno otherwise.
3149 */
b6103f6d 3150int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 3151{
852ee16a 3152 u32 scontrol;
81952c54 3153 int rc;
852ee16a 3154
c2bd5804
TH
3155 DPRINTK("ENTER\n");
3156
3c567b7d 3157 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3158 /* SATA spec says nothing about how to reconfigure
3159 * spd. To be on the safe side, turn off phy during
3160 * reconfiguration. This works for at least ICH7 AHCI
3161 * and Sil3124.
3162 */
81952c54 3163 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3164 goto out;
81952c54 3165
a34b6fc0 3166 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3167
3168 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3169 goto out;
1c3fae4d 3170
3c567b7d 3171 sata_set_spd(ap);
1c3fae4d
TH
3172 }
3173
3174 /* issue phy wake/reset */
81952c54 3175 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3176 goto out;
81952c54 3177
852ee16a 3178 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3179
3180 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3181 goto out;
c2bd5804 3182
1c3fae4d 3183 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3184 * 10.4.2 says at least 1 ms.
3185 */
3186 msleep(1);
3187
1c3fae4d 3188 /* bring phy back */
b6103f6d
TH
3189 rc = sata_phy_resume(ap, timing);
3190 out:
3191 DPRINTK("EXIT, rc=%d\n", rc);
3192 return rc;
3193}
3194
3195/**
3196 * sata_std_hardreset - reset host port via SATA phy reset
3197 * @ap: port to reset
3198 * @class: resulting class of attached device
3199 *
3200 * SATA phy-reset host port using DET bits of SControl register,
3201 * wait for !BSY and classify the attached device.
3202 *
3203 * LOCKING:
3204 * Kernel thread context (may sleep)
3205 *
3206 * RETURNS:
3207 * 0 on success, -errno otherwise.
3208 */
3209int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3210{
3211 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3212 int rc;
3213
3214 DPRINTK("ENTER\n");
3215
3216 /* do hardreset */
3217 rc = sata_port_hardreset(ap, timing);
3218 if (rc) {
3219 ata_port_printk(ap, KERN_ERR,
3220 "COMRESET failed (errno=%d)\n", rc);
3221 return rc;
3222 }
c2bd5804 3223
c2bd5804 3224 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3225 if (ata_port_offline(ap)) {
c2bd5804
TH
3226 *class = ATA_DEV_NONE;
3227 DPRINTK("EXIT, link offline\n");
3228 return 0;
3229 }
3230
34fee227
TH
3231 /* wait a while before checking status, see SRST for more info */
3232 msleep(150);
3233
c2bd5804 3234 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3235 ata_port_printk(ap, KERN_ERR,
3236 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3237 return -EIO;
3238 }
3239
3a39746a
TH
3240 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3241
c2bd5804
TH
3242 *class = ata_dev_try_classify(ap, 0, NULL);
3243
3244 DPRINTK("EXIT, class=%u\n", *class);
3245 return 0;
3246}
3247
3248/**
3249 * ata_std_postreset - standard postreset callback
3250 * @ap: the target ata_port
3251 * @classes: classes of attached devices
3252 *
3253 * This function is invoked after a successful reset. Note that
3254 * the device might have been reset more than once using
3255 * different reset methods before postreset is invoked.
c2bd5804 3256 *
c2bd5804
TH
3257 * LOCKING:
3258 * Kernel thread context (may sleep)
3259 */
3260void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3261{
dc2b3515
TH
3262 u32 serror;
3263
c2bd5804
TH
3264 DPRINTK("ENTER\n");
3265
c2bd5804 3266 /* print link status */
81952c54 3267 sata_print_link_status(ap);
c2bd5804 3268
dc2b3515
TH
3269 /* clear SError */
3270 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3271 sata_scr_write(ap, SCR_ERROR, serror);
3272
3a39746a 3273 /* re-enable interrupts */
83625006
AI
3274 if (!ap->ops->error_handler)
3275 ap->ops->irq_on(ap);
c2bd5804
TH
3276
3277 /* is double-select really necessary? */
3278 if (classes[0] != ATA_DEV_NONE)
3279 ap->ops->dev_select(ap, 1);
3280 if (classes[1] != ATA_DEV_NONE)
3281 ap->ops->dev_select(ap, 0);
3282
3a39746a
TH
3283 /* bail out if no device is present */
3284 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3285 DPRINTK("EXIT, no device\n");
3286 return;
3287 }
3288
3289 /* set up device control */
0d5ff566
TH
3290 if (ap->ioaddr.ctl_addr)
3291 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3292
3293 DPRINTK("EXIT\n");
3294}
3295
623a3128
TH
3296/**
3297 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3298 * @dev: device to compare against
3299 * @new_class: class of the new device
3300 * @new_id: IDENTIFY page of the new device
3301 *
3302 * Compare @new_class and @new_id against @dev and determine
3303 * whether @dev is the device indicated by @new_class and
3304 * @new_id.
3305 *
3306 * LOCKING:
3307 * None.
3308 *
3309 * RETURNS:
3310 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3311 */
3373efd8
TH
3312static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3313 const u16 *new_id)
623a3128
TH
3314{
3315 const u16 *old_id = dev->id;
a0cf733b
TH
3316 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3317 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3318 u64 new_n_sectors;
3319
3320 if (dev->class != new_class) {
f15a1daf
TH
3321 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3322 dev->class, new_class);
623a3128
TH
3323 return 0;
3324 }
3325
a0cf733b
TH
3326 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3327 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3328 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3329 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3330 new_n_sectors = ata_id_n_sectors(new_id);
3331
3332 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3333 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3334 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3335 return 0;
3336 }
3337
3338 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3339 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3340 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3341 return 0;
3342 }
3343
3344 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3345 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3346 "%llu != %llu\n",
3347 (unsigned long long)dev->n_sectors,
3348 (unsigned long long)new_n_sectors);
623a3128
TH
3349 return 0;
3350 }
3351
3352 return 1;
3353}
3354
3355/**
3356 * ata_dev_revalidate - Revalidate ATA device
623a3128 3357 * @dev: device to revalidate
bff04647 3358 * @readid_flags: read ID flags
623a3128
TH
3359 *
3360 * Re-read IDENTIFY page and make sure @dev is still attached to
3361 * the port.
3362 *
3363 * LOCKING:
3364 * Kernel thread context (may sleep)
3365 *
3366 * RETURNS:
3367 * 0 on success, negative errno otherwise
3368 */
bff04647 3369int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3370{
5eb45c02 3371 unsigned int class = dev->class;
f15a1daf 3372 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3373 int rc;
3374
5eb45c02
TH
3375 if (!ata_dev_enabled(dev)) {
3376 rc = -ENODEV;
3377 goto fail;
3378 }
623a3128 3379
fe635c7e 3380 /* read ID data */
bff04647 3381 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3382 if (rc)
3383 goto fail;
3384
3385 /* is the device still there? */
3373efd8 3386 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3387 rc = -ENODEV;
3388 goto fail;
3389 }
3390
fe635c7e 3391 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3392
3393 /* configure device according to the new ID */
efdaedc4 3394 rc = ata_dev_configure(dev);
5eb45c02
TH
3395 if (rc == 0)
3396 return 0;
623a3128
TH
3397
3398 fail:
f15a1daf 3399 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3400 return rc;
3401}
3402
6919a0a6
AC
3403struct ata_blacklist_entry {
3404 const char *model_num;
3405 const char *model_rev;
3406 unsigned long horkage;
3407};
3408
3409static const struct ata_blacklist_entry ata_device_blacklist [] = {
3410 /* Devices with DMA related problems under Linux */
3411 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3412 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3413 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3414 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3415 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3416 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3417 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3418 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3419 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3420 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3421 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3422 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3423 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3424 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3425 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3426 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3427 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3428 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3429 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3430 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3431 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3432 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3433 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3434 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3435 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3436 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3437 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3438 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3439 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3440
18d6e9d5 3441 /* Weird ATAPI devices */
6f23a31d
AL
3442 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 |
3443 ATA_HORKAGE_DMA_RW_ONLY },
18d6e9d5 3444
6919a0a6
AC
3445 /* Devices we expect to fail diagnostics */
3446
3447 /* Devices where NCQ should be avoided */
3448 /* NCQ is slow */
3449 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3450 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3451 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30
PR
3452 /* NCQ is broken */
3453 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
96442925
JA
3454 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3455 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
36e337d0
RH
3456 /* Blacklist entries taken from Silicon Image 3124/3132
3457 Windows driver .inf file - also several Linux problem reports */
3458 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3459 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3460 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6
AC
3461
3462 /* Devices with NCQ limits */
3463
3464 /* End Marker */
3465 { }
1da177e4 3466};
2e9edbf8 3467
6919a0a6 3468unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3469{
8bfa79fc
TH
3470 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3471 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3472 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3473
8bfa79fc
TH
3474 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3475 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3476
6919a0a6 3477 while (ad->model_num) {
8bfa79fc 3478 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3479 if (ad->model_rev == NULL)
3480 return ad->horkage;
8bfa79fc 3481 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3482 return ad->horkage;
f4b15fef 3483 }
6919a0a6 3484 ad++;
f4b15fef 3485 }
1da177e4
LT
3486 return 0;
3487}
3488
6919a0a6
AC
3489static int ata_dma_blacklisted(const struct ata_device *dev)
3490{
3491 /* We don't support polling DMA.
3492 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3493 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3494 */
3495 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3496 (dev->flags & ATA_DFLAG_CDB_INTR))
3497 return 1;
3498 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3499}
3500
a6d5a51c
TH
3501/**
3502 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3503 * @dev: Device to compute xfermask for
3504 *
acf356b1
TH
3505 * Compute supported xfermask of @dev and store it in
3506 * dev->*_mask. This function is responsible for applying all
3507 * known limits including host controller limits, device
3508 * blacklist, etc...
a6d5a51c
TH
3509 *
3510 * LOCKING:
3511 * None.
a6d5a51c 3512 */
3373efd8 3513static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3514{
3373efd8 3515 struct ata_port *ap = dev->ap;
cca3974e 3516 struct ata_host *host = ap->host;
a6d5a51c 3517 unsigned long xfer_mask;
1da177e4 3518
37deecb5 3519 /* controller modes available */
565083e1
TH
3520 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3521 ap->mwdma_mask, ap->udma_mask);
3522
8343f889 3523 /* drive modes available */
37deecb5
TH
3524 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3525 dev->mwdma_mask, dev->udma_mask);
3526 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3527
b352e57d
AC
3528 /*
3529 * CFA Advanced TrueIDE timings are not allowed on a shared
3530 * cable
3531 */
3532 if (ata_dev_pair(dev)) {
3533 /* No PIO5 or PIO6 */
3534 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3535 /* No MWDMA3 or MWDMA 4 */
3536 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3537 }
3538
37deecb5
TH
3539 if (ata_dma_blacklisted(dev)) {
3540 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3541 ata_dev_printk(dev, KERN_WARNING,
3542 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3543 }
a6d5a51c 3544
14d66ab7
PV
3545 if ((host->flags & ATA_HOST_SIMPLEX) &&
3546 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3547 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3548 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3549 "other device, disabling DMA\n");
5444a6f4 3550 }
565083e1 3551
e424675f
JG
3552 if (ap->flags & ATA_FLAG_NO_IORDY)
3553 xfer_mask &= ata_pio_mask_no_iordy(dev);
3554
5444a6f4 3555 if (ap->ops->mode_filter)
a76b62ca 3556 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 3557
8343f889
RH
3558 /* Apply cable rule here. Don't apply it early because when
3559 * we handle hot plug the cable type can itself change.
3560 * Check this last so that we know if the transfer rate was
3561 * solely limited by the cable.
3562 * Unknown or 80 wire cables reported host side are checked
3563 * drive side as well. Cases where we know a 40wire cable
3564 * is used safely for 80 are not checked here.
3565 */
3566 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3567 /* UDMA/44 or higher would be available */
3568 if((ap->cbl == ATA_CBL_PATA40) ||
3569 (ata_drive_40wire(dev->id) &&
3570 (ap->cbl == ATA_CBL_PATA_UNK ||
3571 ap->cbl == ATA_CBL_PATA80))) {
3572 ata_dev_printk(dev, KERN_WARNING,
3573 "limited to UDMA/33 due to 40-wire cable\n");
3574 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3575 }
3576
565083e1
TH
3577 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3578 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3579}
3580
1da177e4
LT
3581/**
3582 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3583 * @dev: Device to which command will be sent
3584 *
780a87f7
JG
3585 * Issue SET FEATURES - XFER MODE command to device @dev
3586 * on port @ap.
3587 *
1da177e4 3588 * LOCKING:
0cba632b 3589 * PCI/etc. bus probe sem.
83206a29
TH
3590 *
3591 * RETURNS:
3592 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3593 */
3594
3373efd8 3595static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3596{
a0123703 3597 struct ata_taskfile tf;
83206a29 3598 unsigned int err_mask;
1da177e4
LT
3599
3600 /* set up set-features taskfile */
3601 DPRINTK("set features - xfer mode\n");
3602
3373efd8 3603 ata_tf_init(dev, &tf);
a0123703
TH
3604 tf.command = ATA_CMD_SET_FEATURES;
3605 tf.feature = SETFEATURES_XFER;
3606 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3607 tf.protocol = ATA_PROT_NODATA;
3608 tf.nsect = dev->xfer_mode;
1da177e4 3609
3373efd8 3610 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3611
83206a29
TH
3612 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3613 return err_mask;
1da177e4
LT
3614}
3615
8bf62ece
AL
3616/**
3617 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3618 * @dev: Device to which command will be sent
e2a7f77a
RD
3619 * @heads: Number of heads (taskfile parameter)
3620 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3621 *
3622 * LOCKING:
6aff8f1f
TH
3623 * Kernel thread context (may sleep)
3624 *
3625 * RETURNS:
3626 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3627 */
3373efd8
TH
3628static unsigned int ata_dev_init_params(struct ata_device *dev,
3629 u16 heads, u16 sectors)
8bf62ece 3630{
a0123703 3631 struct ata_taskfile tf;
6aff8f1f 3632 unsigned int err_mask;
8bf62ece
AL
3633
3634 /* Number of sectors per track 1-255. Number of heads 1-16 */
3635 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3636 return AC_ERR_INVALID;
8bf62ece
AL
3637
3638 /* set up init dev params taskfile */
3639 DPRINTK("init dev params \n");
3640
3373efd8 3641 ata_tf_init(dev, &tf);
a0123703
TH
3642 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3643 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3644 tf.protocol = ATA_PROT_NODATA;
3645 tf.nsect = sectors;
3646 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3647
3373efd8 3648 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3649
6aff8f1f
TH
3650 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3651 return err_mask;
8bf62ece
AL
3652}
3653
1da177e4 3654/**
0cba632b
JG
3655 * ata_sg_clean - Unmap DMA memory associated with command
3656 * @qc: Command containing DMA memory to be released
3657 *
3658 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3659 *
3660 * LOCKING:
cca3974e 3661 * spin_lock_irqsave(host lock)
1da177e4 3662 */
70e6ad0c 3663void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3664{
3665 struct ata_port *ap = qc->ap;
cedc9a47 3666 struct scatterlist *sg = qc->__sg;
1da177e4 3667 int dir = qc->dma_dir;
cedc9a47 3668 void *pad_buf = NULL;
1da177e4 3669
a4631474
TH
3670 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3671 WARN_ON(sg == NULL);
1da177e4
LT
3672
3673 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3674 WARN_ON(qc->n_elem > 1);
1da177e4 3675
2c13b7ce 3676 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3677
cedc9a47
JG
3678 /* if we padded the buffer out to 32-bit bound, and data
3679 * xfer direction is from-device, we must copy from the
3680 * pad buffer back into the supplied buffer
3681 */
3682 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3683 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3684
3685 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3686 if (qc->n_elem)
2f1f610b 3687 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3688 /* restore last sg */
3689 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3690 if (pad_buf) {
3691 struct scatterlist *psg = &qc->pad_sgent;
3692 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3693 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3694 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3695 }
3696 } else {
2e242fa9 3697 if (qc->n_elem)
2f1f610b 3698 dma_unmap_single(ap->dev,
e1410f2d
JG
3699 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3700 dir);
cedc9a47
JG
3701 /* restore sg */
3702 sg->length += qc->pad_len;
3703 if (pad_buf)
3704 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3705 pad_buf, qc->pad_len);
3706 }
1da177e4
LT
3707
3708 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3709 qc->__sg = NULL;
1da177e4
LT
3710}
3711
3712/**
3713 * ata_fill_sg - Fill PCI IDE PRD table
3714 * @qc: Metadata associated with taskfile to be transferred
3715 *
780a87f7
JG
3716 * Fill PCI IDE PRD (scatter-gather) table with segments
3717 * associated with the current disk command.
3718 *
1da177e4 3719 * LOCKING:
cca3974e 3720 * spin_lock_irqsave(host lock)
1da177e4
LT
3721 *
3722 */
3723static void ata_fill_sg(struct ata_queued_cmd *qc)
3724{
1da177e4 3725 struct ata_port *ap = qc->ap;
cedc9a47
JG
3726 struct scatterlist *sg;
3727 unsigned int idx;
1da177e4 3728
a4631474 3729 WARN_ON(qc->__sg == NULL);
f131883e 3730 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3731
3732 idx = 0;
cedc9a47 3733 ata_for_each_sg(sg, qc) {
1da177e4
LT
3734 u32 addr, offset;
3735 u32 sg_len, len;
3736
3737 /* determine if physical DMA addr spans 64K boundary.
3738 * Note h/w doesn't support 64-bit, so we unconditionally
3739 * truncate dma_addr_t to u32.
3740 */
3741 addr = (u32) sg_dma_address(sg);
3742 sg_len = sg_dma_len(sg);
3743
3744 while (sg_len) {
3745 offset = addr & 0xffff;
3746 len = sg_len;
3747 if ((offset + sg_len) > 0x10000)
3748 len = 0x10000 - offset;
3749
3750 ap->prd[idx].addr = cpu_to_le32(addr);
3751 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3752 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3753
3754 idx++;
3755 sg_len -= len;
3756 addr += len;
3757 }
3758 }
3759
3760 if (idx)
3761 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3762}
3763/**
3764 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3765 * @qc: Metadata associated with taskfile to check
3766 *
780a87f7
JG
3767 * Allow low-level driver to filter ATA PACKET commands, returning
3768 * a status indicating whether or not it is OK to use DMA for the
3769 * supplied PACKET command.
3770 *
1da177e4 3771 * LOCKING:
cca3974e 3772 * spin_lock_irqsave(host lock)
0cba632b 3773 *
1da177e4
LT
3774 * RETURNS: 0 when ATAPI DMA can be used
3775 * nonzero otherwise
3776 */
3777int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3778{
3779 struct ata_port *ap = qc->ap;
3780 int rc = 0; /* Assume ATAPI DMA is OK by default */
3781
6f23a31d
AL
3782 /* some drives can only do ATAPI DMA on read/write */
3783 if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
3784 struct scsi_cmnd *cmd = qc->scsicmd;
3785 u8 *scsicmd = cmd->cmnd;
3786
3787 switch (scsicmd[0]) {
3788 case READ_10:
3789 case WRITE_10:
3790 case READ_12:
3791 case WRITE_12:
3792 case READ_6:
3793 case WRITE_6:
3794 /* atapi dma maybe ok */
3795 break;
3796 default:
3797 /* turn off atapi dma */
3798 return 1;
3799 }
3800 }
3801
1da177e4
LT
3802 if (ap->ops->check_atapi_dma)
3803 rc = ap->ops->check_atapi_dma(qc);
3804
3805 return rc;
3806}
3807/**
3808 * ata_qc_prep - Prepare taskfile for submission
3809 * @qc: Metadata associated with taskfile to be prepared
3810 *
780a87f7
JG
3811 * Prepare ATA taskfile for submission.
3812 *
1da177e4 3813 * LOCKING:
cca3974e 3814 * spin_lock_irqsave(host lock)
1da177e4
LT
3815 */
3816void ata_qc_prep(struct ata_queued_cmd *qc)
3817{
3818 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3819 return;
3820
3821 ata_fill_sg(qc);
3822}
3823
e46834cd
BK
3824void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3825
0cba632b
JG
3826/**
3827 * ata_sg_init_one - Associate command with memory buffer
3828 * @qc: Command to be associated
3829 * @buf: Memory buffer
3830 * @buflen: Length of memory buffer, in bytes.
3831 *
3832 * Initialize the data-related elements of queued_cmd @qc
3833 * to point to a single memory buffer, @buf of byte length @buflen.
3834 *
3835 * LOCKING:
cca3974e 3836 * spin_lock_irqsave(host lock)
0cba632b
JG
3837 */
3838
1da177e4
LT
3839void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3840{
1da177e4
LT
3841 qc->flags |= ATA_QCFLAG_SINGLE;
3842
cedc9a47 3843 qc->__sg = &qc->sgent;
1da177e4 3844 qc->n_elem = 1;
cedc9a47 3845 qc->orig_n_elem = 1;
1da177e4 3846 qc->buf_virt = buf;
233277ca 3847 qc->nbytes = buflen;
1da177e4 3848
61c0596c 3849 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3850}
3851
0cba632b
JG
3852/**
3853 * ata_sg_init - Associate command with scatter-gather table.
3854 * @qc: Command to be associated
3855 * @sg: Scatter-gather table.
3856 * @n_elem: Number of elements in s/g table.
3857 *
3858 * Initialize the data-related elements of queued_cmd @qc
3859 * to point to a scatter-gather table @sg, containing @n_elem
3860 * elements.
3861 *
3862 * LOCKING:
cca3974e 3863 * spin_lock_irqsave(host lock)
0cba632b
JG
3864 */
3865
1da177e4
LT
3866void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3867 unsigned int n_elem)
3868{
3869 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3870 qc->__sg = sg;
1da177e4 3871 qc->n_elem = n_elem;
cedc9a47 3872 qc->orig_n_elem = n_elem;
1da177e4
LT
3873}
3874
3875/**
0cba632b
JG
3876 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3877 * @qc: Command with memory buffer to be mapped.
3878 *
3879 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3880 *
3881 * LOCKING:
cca3974e 3882 * spin_lock_irqsave(host lock)
1da177e4
LT
3883 *
3884 * RETURNS:
0cba632b 3885 * Zero on success, negative on error.
1da177e4
LT
3886 */
3887
3888static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3889{
3890 struct ata_port *ap = qc->ap;
3891 int dir = qc->dma_dir;
cedc9a47 3892 struct scatterlist *sg = qc->__sg;
1da177e4 3893 dma_addr_t dma_address;
2e242fa9 3894 int trim_sg = 0;
1da177e4 3895
cedc9a47
JG
3896 /* we must lengthen transfers to end on a 32-bit boundary */
3897 qc->pad_len = sg->length & 3;
3898 if (qc->pad_len) {
3899 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3900 struct scatterlist *psg = &qc->pad_sgent;
3901
a4631474 3902 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3903
3904 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3905
3906 if (qc->tf.flags & ATA_TFLAG_WRITE)
3907 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3908 qc->pad_len);
3909
3910 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3911 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3912 /* trim sg */
3913 sg->length -= qc->pad_len;
2e242fa9
TH
3914 if (sg->length == 0)
3915 trim_sg = 1;
cedc9a47
JG
3916
3917 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3918 sg->length, qc->pad_len);
3919 }
3920
2e242fa9
TH
3921 if (trim_sg) {
3922 qc->n_elem--;
e1410f2d
JG
3923 goto skip_map;
3924 }
3925
2f1f610b 3926 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3927 sg->length, dir);
537a95d9
TH
3928 if (dma_mapping_error(dma_address)) {
3929 /* restore sg */
3930 sg->length += qc->pad_len;
1da177e4 3931 return -1;
537a95d9 3932 }
1da177e4
LT
3933
3934 sg_dma_address(sg) = dma_address;
32529e01 3935 sg_dma_len(sg) = sg->length;
1da177e4 3936
2e242fa9 3937skip_map:
1da177e4
LT
3938 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3939 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3940
3941 return 0;
3942}
3943
3944/**
0cba632b
JG
3945 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3946 * @qc: Command with scatter-gather table to be mapped.
3947 *
3948 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3949 *
3950 * LOCKING:
cca3974e 3951 * spin_lock_irqsave(host lock)
1da177e4
LT
3952 *
3953 * RETURNS:
0cba632b 3954 * Zero on success, negative on error.
1da177e4
LT
3955 *
3956 */
3957
3958static int ata_sg_setup(struct ata_queued_cmd *qc)
3959{
3960 struct ata_port *ap = qc->ap;
cedc9a47
JG
3961 struct scatterlist *sg = qc->__sg;
3962 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3963 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 3964
44877b4e 3965 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 3966 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3967
cedc9a47
JG
3968 /* we must lengthen transfers to end on a 32-bit boundary */
3969 qc->pad_len = lsg->length & 3;
3970 if (qc->pad_len) {
3971 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3972 struct scatterlist *psg = &qc->pad_sgent;
3973 unsigned int offset;
3974
a4631474 3975 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3976
3977 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3978
3979 /*
3980 * psg->page/offset are used to copy to-be-written
3981 * data in this function or read data in ata_sg_clean.
3982 */
3983 offset = lsg->offset + lsg->length - qc->pad_len;
3984 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3985 psg->offset = offset_in_page(offset);
3986
3987 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3988 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3989 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3990 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3991 }
3992
3993 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3994 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3995 /* trim last sg */
3996 lsg->length -= qc->pad_len;
e1410f2d
JG
3997 if (lsg->length == 0)
3998 trim_sg = 1;
cedc9a47
JG
3999
4000 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4001 qc->n_elem - 1, lsg->length, qc->pad_len);
4002 }
4003
e1410f2d
JG
4004 pre_n_elem = qc->n_elem;
4005 if (trim_sg && pre_n_elem)
4006 pre_n_elem--;
4007
4008 if (!pre_n_elem) {
4009 n_elem = 0;
4010 goto skip_map;
4011 }
4012
1da177e4 4013 dir = qc->dma_dir;
2f1f610b 4014 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4015 if (n_elem < 1) {
4016 /* restore last sg */
4017 lsg->length += qc->pad_len;
1da177e4 4018 return -1;
537a95d9 4019 }
1da177e4
LT
4020
4021 DPRINTK("%d sg elements mapped\n", n_elem);
4022
e1410f2d 4023skip_map:
1da177e4
LT
4024 qc->n_elem = n_elem;
4025
4026 return 0;
4027}
4028
0baab86b 4029/**
c893a3ae 4030 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4031 * @buf: Buffer to swap
4032 * @buf_words: Number of 16-bit words in buffer.
4033 *
4034 * Swap halves of 16-bit words if needed to convert from
4035 * little-endian byte order to native cpu byte order, or
4036 * vice-versa.
4037 *
4038 * LOCKING:
6f0ef4fa 4039 * Inherited from caller.
0baab86b 4040 */
1da177e4
LT
4041void swap_buf_le16(u16 *buf, unsigned int buf_words)
4042{
4043#ifdef __BIG_ENDIAN
4044 unsigned int i;
4045
4046 for (i = 0; i < buf_words; i++)
4047 buf[i] = le16_to_cpu(buf[i]);
4048#endif /* __BIG_ENDIAN */
4049}
4050
6ae4cfb5 4051/**
0d5ff566 4052 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4053 * @adev: device to target
6ae4cfb5
AL
4054 * @buf: data buffer
4055 * @buflen: buffer length
344babaa 4056 * @write_data: read/write
6ae4cfb5
AL
4057 *
4058 * Transfer data from/to the device data register by PIO.
4059 *
4060 * LOCKING:
4061 * Inherited from caller.
6ae4cfb5 4062 */
0d5ff566
TH
4063void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4064 unsigned int buflen, int write_data)
1da177e4 4065{
a6b2c5d4 4066 struct ata_port *ap = adev->ap;
6ae4cfb5 4067 unsigned int words = buflen >> 1;
1da177e4 4068
6ae4cfb5 4069 /* Transfer multiple of 2 bytes */
1da177e4 4070 if (write_data)
0d5ff566 4071 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4072 else
0d5ff566 4073 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4074
4075 /* Transfer trailing 1 byte, if any. */
4076 if (unlikely(buflen & 0x01)) {
4077 u16 align_buf[1] = { 0 };
4078 unsigned char *trailing_buf = buf + buflen - 1;
4079
4080 if (write_data) {
4081 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4082 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4083 } else {
0d5ff566 4084 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4085 memcpy(trailing_buf, align_buf, 1);
4086 }
4087 }
1da177e4
LT
4088}
4089
75e99585 4090/**
0d5ff566 4091 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4092 * @adev: device to target
4093 * @buf: data buffer
4094 * @buflen: buffer length
4095 * @write_data: read/write
4096 *
88574551 4097 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4098 * transfer with interrupts disabled.
4099 *
4100 * LOCKING:
4101 * Inherited from caller.
4102 */
0d5ff566
TH
4103void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4104 unsigned int buflen, int write_data)
75e99585
AC
4105{
4106 unsigned long flags;
4107 local_irq_save(flags);
0d5ff566 4108 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4109 local_irq_restore(flags);
4110}
4111
4112
6ae4cfb5 4113/**
5a5dbd18 4114 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4115 * @qc: Command on going
4116 *
5a5dbd18 4117 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4118 *
4119 * LOCKING:
4120 * Inherited from caller.
4121 */
4122
1da177e4
LT
4123static void ata_pio_sector(struct ata_queued_cmd *qc)
4124{
4125 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4126 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4127 struct ata_port *ap = qc->ap;
4128 struct page *page;
4129 unsigned int offset;
4130 unsigned char *buf;
4131
5a5dbd18 4132 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4133 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4134
4135 page = sg[qc->cursg].page;
726f0785 4136 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4137
4138 /* get the current page and offset */
4139 page = nth_page(page, (offset >> PAGE_SHIFT));
4140 offset %= PAGE_SIZE;
4141
1da177e4
LT
4142 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4143
91b8b313
AL
4144 if (PageHighMem(page)) {
4145 unsigned long flags;
4146
a6b2c5d4 4147 /* FIXME: use a bounce buffer */
91b8b313
AL
4148 local_irq_save(flags);
4149 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4150
91b8b313 4151 /* do the actual data transfer */
5a5dbd18 4152 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4153
91b8b313
AL
4154 kunmap_atomic(buf, KM_IRQ0);
4155 local_irq_restore(flags);
4156 } else {
4157 buf = page_address(page);
5a5dbd18 4158 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4159 }
1da177e4 4160
5a5dbd18
ML
4161 qc->curbytes += qc->sect_size;
4162 qc->cursg_ofs += qc->sect_size;
1da177e4 4163
726f0785 4164 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4165 qc->cursg++;
4166 qc->cursg_ofs = 0;
4167 }
1da177e4 4168}
1da177e4 4169
07f6f7d0 4170/**
5a5dbd18 4171 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4172 * @qc: Command on going
4173 *
5a5dbd18 4174 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4175 * ATA device for the DRQ request.
4176 *
4177 * LOCKING:
4178 * Inherited from caller.
4179 */
1da177e4 4180
07f6f7d0
AL
4181static void ata_pio_sectors(struct ata_queued_cmd *qc)
4182{
4183 if (is_multi_taskfile(&qc->tf)) {
4184 /* READ/WRITE MULTIPLE */
4185 unsigned int nsect;
4186
587005de 4187 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4188
5a5dbd18 4189 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4190 qc->dev->multi_count);
07f6f7d0
AL
4191 while (nsect--)
4192 ata_pio_sector(qc);
4193 } else
4194 ata_pio_sector(qc);
4195}
4196
c71c1857
AL
4197/**
4198 * atapi_send_cdb - Write CDB bytes to hardware
4199 * @ap: Port to which ATAPI device is attached.
4200 * @qc: Taskfile currently active
4201 *
4202 * When device has indicated its readiness to accept
4203 * a CDB, this function is called. Send the CDB.
4204 *
4205 * LOCKING:
4206 * caller.
4207 */
4208
4209static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4210{
4211 /* send SCSI cdb */
4212 DPRINTK("send cdb\n");
db024d53 4213 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4214
a6b2c5d4 4215 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4216 ata_altstatus(ap); /* flush */
4217
4218 switch (qc->tf.protocol) {
4219 case ATA_PROT_ATAPI:
4220 ap->hsm_task_state = HSM_ST;
4221 break;
4222 case ATA_PROT_ATAPI_NODATA:
4223 ap->hsm_task_state = HSM_ST_LAST;
4224 break;
4225 case ATA_PROT_ATAPI_DMA:
4226 ap->hsm_task_state = HSM_ST_LAST;
4227 /* initiate bmdma */
4228 ap->ops->bmdma_start(qc);
4229 break;
4230 }
1da177e4
LT
4231}
4232
6ae4cfb5
AL
4233/**
4234 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4235 * @qc: Command on going
4236 * @bytes: number of bytes
4237 *
4238 * Transfer Transfer data from/to the ATAPI device.
4239 *
4240 * LOCKING:
4241 * Inherited from caller.
4242 *
4243 */
4244
1da177e4
LT
4245static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4246{
4247 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4248 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4249 struct ata_port *ap = qc->ap;
4250 struct page *page;
4251 unsigned char *buf;
4252 unsigned int offset, count;
4253
563a6e1f 4254 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4255 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4256
4257next_sg:
563a6e1f 4258 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4259 /*
563a6e1f
AL
4260 * The end of qc->sg is reached and the device expects
4261 * more data to transfer. In order not to overrun qc->sg
4262 * and fulfill length specified in the byte count register,
4263 * - for read case, discard trailing data from the device
4264 * - for write case, padding zero data to the device
4265 */
4266 u16 pad_buf[1] = { 0 };
4267 unsigned int words = bytes >> 1;
4268 unsigned int i;
4269
4270 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4271 ata_dev_printk(qc->dev, KERN_WARNING,
4272 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4273
4274 for (i = 0; i < words; i++)
a6b2c5d4 4275 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4276
14be71f4 4277 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4278 return;
4279 }
4280
cedc9a47 4281 sg = &qc->__sg[qc->cursg];
1da177e4 4282
1da177e4
LT
4283 page = sg->page;
4284 offset = sg->offset + qc->cursg_ofs;
4285
4286 /* get the current page and offset */
4287 page = nth_page(page, (offset >> PAGE_SHIFT));
4288 offset %= PAGE_SIZE;
4289
6952df03 4290 /* don't overrun current sg */
32529e01 4291 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4292
4293 /* don't cross page boundaries */
4294 count = min(count, (unsigned int)PAGE_SIZE - offset);
4295
7282aa4b
AL
4296 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4297
91b8b313
AL
4298 if (PageHighMem(page)) {
4299 unsigned long flags;
4300
a6b2c5d4 4301 /* FIXME: use bounce buffer */
91b8b313
AL
4302 local_irq_save(flags);
4303 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4304
91b8b313 4305 /* do the actual data transfer */
a6b2c5d4 4306 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4307
91b8b313
AL
4308 kunmap_atomic(buf, KM_IRQ0);
4309 local_irq_restore(flags);
4310 } else {
4311 buf = page_address(page);
a6b2c5d4 4312 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4313 }
1da177e4
LT
4314
4315 bytes -= count;
4316 qc->curbytes += count;
4317 qc->cursg_ofs += count;
4318
32529e01 4319 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4320 qc->cursg++;
4321 qc->cursg_ofs = 0;
4322 }
4323
563a6e1f 4324 if (bytes)
1da177e4 4325 goto next_sg;
1da177e4
LT
4326}
4327
6ae4cfb5
AL
4328/**
4329 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4330 * @qc: Command on going
4331 *
4332 * Transfer Transfer data from/to the ATAPI device.
4333 *
4334 * LOCKING:
4335 * Inherited from caller.
6ae4cfb5
AL
4336 */
4337
1da177e4
LT
4338static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4339{
4340 struct ata_port *ap = qc->ap;
4341 struct ata_device *dev = qc->dev;
4342 unsigned int ireason, bc_lo, bc_hi, bytes;
4343 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4344
eec4c3f3
AL
4345 /* Abuse qc->result_tf for temp storage of intermediate TF
4346 * here to save some kernel stack usage.
4347 * For normal completion, qc->result_tf is not relevant. For
4348 * error, qc->result_tf is later overwritten by ata_qc_complete().
4349 * So, the correctness of qc->result_tf is not affected.
4350 */
4351 ap->ops->tf_read(ap, &qc->result_tf);
4352 ireason = qc->result_tf.nsect;
4353 bc_lo = qc->result_tf.lbam;
4354 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4355 bytes = (bc_hi << 8) | bc_lo;
4356
4357 /* shall be cleared to zero, indicating xfer of data */
4358 if (ireason & (1 << 0))
4359 goto err_out;
4360
4361 /* make sure transfer direction matches expected */
4362 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4363 if (do_write != i_write)
4364 goto err_out;
4365
44877b4e 4366 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4367
1da177e4
LT
4368 __atapi_pio_bytes(qc, bytes);
4369
4370 return;
4371
4372err_out:
f15a1daf 4373 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4374 qc->err_mask |= AC_ERR_HSM;
14be71f4 4375 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4376}
4377
4378/**
c234fb00
AL
4379 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4380 * @ap: the target ata_port
4381 * @qc: qc on going
1da177e4 4382 *
c234fb00
AL
4383 * RETURNS:
4384 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4385 */
c234fb00
AL
4386
4387static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4388{
c234fb00
AL
4389 if (qc->tf.flags & ATA_TFLAG_POLLING)
4390 return 1;
1da177e4 4391
c234fb00
AL
4392 if (ap->hsm_task_state == HSM_ST_FIRST) {
4393 if (qc->tf.protocol == ATA_PROT_PIO &&
4394 (qc->tf.flags & ATA_TFLAG_WRITE))
4395 return 1;
1da177e4 4396
c234fb00
AL
4397 if (is_atapi_taskfile(&qc->tf) &&
4398 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4399 return 1;
fe79e683
AL
4400 }
4401
c234fb00
AL
4402 return 0;
4403}
1da177e4 4404
c17ea20d
TH
4405/**
4406 * ata_hsm_qc_complete - finish a qc running on standard HSM
4407 * @qc: Command to complete
4408 * @in_wq: 1 if called from workqueue, 0 otherwise
4409 *
4410 * Finish @qc which is running on standard HSM.
4411 *
4412 * LOCKING:
cca3974e 4413 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4414 * Otherwise, none on entry and grabs host lock.
4415 */
4416static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4417{
4418 struct ata_port *ap = qc->ap;
4419 unsigned long flags;
4420
4421 if (ap->ops->error_handler) {
4422 if (in_wq) {
ba6a1308 4423 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4424
cca3974e
JG
4425 /* EH might have kicked in while host lock is
4426 * released.
c17ea20d
TH
4427 */
4428 qc = ata_qc_from_tag(ap, qc->tag);
4429 if (qc) {
4430 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4431 ap->ops->irq_on(ap);
c17ea20d
TH
4432 ata_qc_complete(qc);
4433 } else
4434 ata_port_freeze(ap);
4435 }
4436
ba6a1308 4437 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4438 } else {
4439 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4440 ata_qc_complete(qc);
4441 else
4442 ata_port_freeze(ap);
4443 }
4444 } else {
4445 if (in_wq) {
ba6a1308 4446 spin_lock_irqsave(ap->lock, flags);
83625006 4447 ap->ops->irq_on(ap);
c17ea20d 4448 ata_qc_complete(qc);
ba6a1308 4449 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4450 } else
4451 ata_qc_complete(qc);
4452 }
1da177e4 4453
c81e29b4 4454 ata_altstatus(ap); /* flush */
c17ea20d
TH
4455}
4456
bb5cb290
AL
4457/**
4458 * ata_hsm_move - move the HSM to the next state.
4459 * @ap: the target ata_port
4460 * @qc: qc on going
4461 * @status: current device status
4462 * @in_wq: 1 if called from workqueue, 0 otherwise
4463 *
4464 * RETURNS:
4465 * 1 when poll next status needed, 0 otherwise.
4466 */
9a1004d0
TH
4467int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4468 u8 status, int in_wq)
e2cec771 4469{
bb5cb290
AL
4470 unsigned long flags = 0;
4471 int poll_next;
4472
6912ccd5
AL
4473 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4474
bb5cb290
AL
4475 /* Make sure ata_qc_issue_prot() does not throw things
4476 * like DMA polling into the workqueue. Notice that
4477 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4478 */
c234fb00 4479 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4480
e2cec771 4481fsm_start:
999bb6f4 4482 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4483 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4484
e2cec771
AL
4485 switch (ap->hsm_task_state) {
4486 case HSM_ST_FIRST:
bb5cb290
AL
4487 /* Send first data block or PACKET CDB */
4488
4489 /* If polling, we will stay in the work queue after
4490 * sending the data. Otherwise, interrupt handler
4491 * takes over after sending the data.
4492 */
4493 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4494
e2cec771 4495 /* check device status */
3655d1d3
AL
4496 if (unlikely((status & ATA_DRQ) == 0)) {
4497 /* handle BSY=0, DRQ=0 as error */
4498 if (likely(status & (ATA_ERR | ATA_DF)))
4499 /* device stops HSM for abort/error */
4500 qc->err_mask |= AC_ERR_DEV;
4501 else
4502 /* HSM violation. Let EH handle this */
4503 qc->err_mask |= AC_ERR_HSM;
4504
14be71f4 4505 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4506 goto fsm_start;
1da177e4
LT
4507 }
4508
71601958
AL
4509 /* Device should not ask for data transfer (DRQ=1)
4510 * when it finds something wrong.
eee6c32f
AL
4511 * We ignore DRQ here and stop the HSM by
4512 * changing hsm_task_state to HSM_ST_ERR and
4513 * let the EH abort the command or reset the device.
71601958
AL
4514 */
4515 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4516 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4517 "error, dev_stat 0x%X\n", status);
3655d1d3 4518 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4519 ap->hsm_task_state = HSM_ST_ERR;
4520 goto fsm_start;
71601958 4521 }
1da177e4 4522
bb5cb290
AL
4523 /* Send the CDB (atapi) or the first data block (ata pio out).
4524 * During the state transition, interrupt handler shouldn't
4525 * be invoked before the data transfer is complete and
4526 * hsm_task_state is changed. Hence, the following locking.
4527 */
4528 if (in_wq)
ba6a1308 4529 spin_lock_irqsave(ap->lock, flags);
1da177e4 4530
bb5cb290
AL
4531 if (qc->tf.protocol == ATA_PROT_PIO) {
4532 /* PIO data out protocol.
4533 * send first data block.
4534 */
0565c26d 4535
bb5cb290
AL
4536 /* ata_pio_sectors() might change the state
4537 * to HSM_ST_LAST. so, the state is changed here
4538 * before ata_pio_sectors().
4539 */
4540 ap->hsm_task_state = HSM_ST;
4541 ata_pio_sectors(qc);
4542 ata_altstatus(ap); /* flush */
4543 } else
4544 /* send CDB */
4545 atapi_send_cdb(ap, qc);
4546
4547 if (in_wq)
ba6a1308 4548 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4549
4550 /* if polling, ata_pio_task() handles the rest.
4551 * otherwise, interrupt handler takes over from here.
4552 */
e2cec771 4553 break;
1c848984 4554
e2cec771
AL
4555 case HSM_ST:
4556 /* complete command or read/write the data register */
4557 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4558 /* ATAPI PIO protocol */
4559 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4560 /* No more data to transfer or device error.
4561 * Device error will be tagged in HSM_ST_LAST.
4562 */
e2cec771
AL
4563 ap->hsm_task_state = HSM_ST_LAST;
4564 goto fsm_start;
4565 }
1da177e4 4566
71601958
AL
4567 /* Device should not ask for data transfer (DRQ=1)
4568 * when it finds something wrong.
eee6c32f
AL
4569 * We ignore DRQ here and stop the HSM by
4570 * changing hsm_task_state to HSM_ST_ERR and
4571 * let the EH abort the command or reset the device.
71601958
AL
4572 */
4573 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4574 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4575 "device error, dev_stat 0x%X\n",
4576 status);
3655d1d3 4577 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4578 ap->hsm_task_state = HSM_ST_ERR;
4579 goto fsm_start;
71601958 4580 }
1da177e4 4581
e2cec771 4582 atapi_pio_bytes(qc);
7fb6ec28 4583
e2cec771
AL
4584 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4585 /* bad ireason reported by device */
4586 goto fsm_start;
1da177e4 4587
e2cec771
AL
4588 } else {
4589 /* ATA PIO protocol */
4590 if (unlikely((status & ATA_DRQ) == 0)) {
4591 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4592 if (likely(status & (ATA_ERR | ATA_DF)))
4593 /* device stops HSM for abort/error */
4594 qc->err_mask |= AC_ERR_DEV;
4595 else
55a8e2c8
TH
4596 /* HSM violation. Let EH handle this.
4597 * Phantom devices also trigger this
4598 * condition. Mark hint.
4599 */
4600 qc->err_mask |= AC_ERR_HSM |
4601 AC_ERR_NODEV_HINT;
3655d1d3 4602
e2cec771
AL
4603 ap->hsm_task_state = HSM_ST_ERR;
4604 goto fsm_start;
4605 }
1da177e4 4606
eee6c32f
AL
4607 /* For PIO reads, some devices may ask for
4608 * data transfer (DRQ=1) alone with ERR=1.
4609 * We respect DRQ here and transfer one
4610 * block of junk data before changing the
4611 * hsm_task_state to HSM_ST_ERR.
4612 *
4613 * For PIO writes, ERR=1 DRQ=1 doesn't make
4614 * sense since the data block has been
4615 * transferred to the device.
71601958
AL
4616 */
4617 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4618 /* data might be corrputed */
4619 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4620
4621 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4622 ata_pio_sectors(qc);
4623 ata_altstatus(ap);
4624 status = ata_wait_idle(ap);
4625 }
4626
3655d1d3
AL
4627 if (status & (ATA_BUSY | ATA_DRQ))
4628 qc->err_mask |= AC_ERR_HSM;
4629
eee6c32f
AL
4630 /* ata_pio_sectors() might change the
4631 * state to HSM_ST_LAST. so, the state
4632 * is changed after ata_pio_sectors().
4633 */
4634 ap->hsm_task_state = HSM_ST_ERR;
4635 goto fsm_start;
71601958
AL
4636 }
4637
e2cec771
AL
4638 ata_pio_sectors(qc);
4639
4640 if (ap->hsm_task_state == HSM_ST_LAST &&
4641 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4642 /* all data read */
4643 ata_altstatus(ap);
52a32205 4644 status = ata_wait_idle(ap);
e2cec771
AL
4645 goto fsm_start;
4646 }
4647 }
4648
4649 ata_altstatus(ap); /* flush */
bb5cb290 4650 poll_next = 1;
1da177e4
LT
4651 break;
4652
14be71f4 4653 case HSM_ST_LAST:
6912ccd5
AL
4654 if (unlikely(!ata_ok(status))) {
4655 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4656 ap->hsm_task_state = HSM_ST_ERR;
4657 goto fsm_start;
4658 }
4659
4660 /* no more data to transfer */
4332a771 4661 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 4662 ap->print_id, qc->dev->devno, status);
e2cec771 4663
6912ccd5
AL
4664 WARN_ON(qc->err_mask);
4665
e2cec771 4666 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4667
e2cec771 4668 /* complete taskfile transaction */
c17ea20d 4669 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4670
4671 poll_next = 0;
1da177e4
LT
4672 break;
4673
14be71f4 4674 case HSM_ST_ERR:
e2cec771
AL
4675 /* make sure qc->err_mask is available to
4676 * know what's wrong and recover
4677 */
4678 WARN_ON(qc->err_mask == 0);
4679
4680 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4681
999bb6f4 4682 /* complete taskfile transaction */
c17ea20d 4683 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4684
4685 poll_next = 0;
e2cec771
AL
4686 break;
4687 default:
bb5cb290 4688 poll_next = 0;
6912ccd5 4689 BUG();
1da177e4
LT
4690 }
4691
bb5cb290 4692 return poll_next;
1da177e4
LT
4693}
4694
65f27f38 4695static void ata_pio_task(struct work_struct *work)
8061f5f0 4696{
65f27f38
DH
4697 struct ata_port *ap =
4698 container_of(work, struct ata_port, port_task.work);
4699 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4700 u8 status;
a1af3734 4701 int poll_next;
8061f5f0 4702
7fb6ec28 4703fsm_start:
a1af3734 4704 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4705
a1af3734
AL
4706 /*
4707 * This is purely heuristic. This is a fast path.
4708 * Sometimes when we enter, BSY will be cleared in
4709 * a chk-status or two. If not, the drive is probably seeking
4710 * or something. Snooze for a couple msecs, then
4711 * chk-status again. If still busy, queue delayed work.
4712 */
4713 status = ata_busy_wait(ap, ATA_BUSY, 5);
4714 if (status & ATA_BUSY) {
4715 msleep(2);
4716 status = ata_busy_wait(ap, ATA_BUSY, 10);
4717 if (status & ATA_BUSY) {
31ce6dae 4718 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4719 return;
4720 }
8061f5f0
TH
4721 }
4722
a1af3734
AL
4723 /* move the HSM */
4724 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4725
a1af3734
AL
4726 /* another command or interrupt handler
4727 * may be running at this point.
4728 */
4729 if (poll_next)
7fb6ec28 4730 goto fsm_start;
8061f5f0
TH
4731}
4732
1da177e4
LT
4733/**
4734 * ata_qc_new - Request an available ATA command, for queueing
4735 * @ap: Port associated with device @dev
4736 * @dev: Device from whom we request an available command structure
4737 *
4738 * LOCKING:
0cba632b 4739 * None.
1da177e4
LT
4740 */
4741
4742static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4743{
4744 struct ata_queued_cmd *qc = NULL;
4745 unsigned int i;
4746
e3180499 4747 /* no command while frozen */
b51e9e5d 4748 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4749 return NULL;
4750
2ab7db1f
TH
4751 /* the last tag is reserved for internal command. */
4752 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4753 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4754 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4755 break;
4756 }
4757
4758 if (qc)
4759 qc->tag = i;
4760
4761 return qc;
4762}
4763
4764/**
4765 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4766 * @dev: Device from whom we request an available command structure
4767 *
4768 * LOCKING:
0cba632b 4769 * None.
1da177e4
LT
4770 */
4771
3373efd8 4772struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4773{
3373efd8 4774 struct ata_port *ap = dev->ap;
1da177e4
LT
4775 struct ata_queued_cmd *qc;
4776
4777 qc = ata_qc_new(ap);
4778 if (qc) {
1da177e4
LT
4779 qc->scsicmd = NULL;
4780 qc->ap = ap;
4781 qc->dev = dev;
1da177e4 4782
2c13b7ce 4783 ata_qc_reinit(qc);
1da177e4
LT
4784 }
4785
4786 return qc;
4787}
4788
1da177e4
LT
4789/**
4790 * ata_qc_free - free unused ata_queued_cmd
4791 * @qc: Command to complete
4792 *
4793 * Designed to free unused ata_queued_cmd object
4794 * in case something prevents using it.
4795 *
4796 * LOCKING:
cca3974e 4797 * spin_lock_irqsave(host lock)
1da177e4
LT
4798 */
4799void ata_qc_free(struct ata_queued_cmd *qc)
4800{
4ba946e9
TH
4801 struct ata_port *ap = qc->ap;
4802 unsigned int tag;
4803
a4631474 4804 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4805
4ba946e9
TH
4806 qc->flags = 0;
4807 tag = qc->tag;
4808 if (likely(ata_tag_valid(tag))) {
4ba946e9 4809 qc->tag = ATA_TAG_POISON;
6cec4a39 4810 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4811 }
1da177e4
LT
4812}
4813
76014427 4814void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4815{
dedaf2b0
TH
4816 struct ata_port *ap = qc->ap;
4817
a4631474
TH
4818 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4819 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4820
4821 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4822 ata_sg_clean(qc);
4823
7401abf2 4824 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4825 if (qc->tf.protocol == ATA_PROT_NCQ)
4826 ap->sactive &= ~(1 << qc->tag);
4827 else
4828 ap->active_tag = ATA_TAG_POISON;
7401abf2 4829
3f3791d3
AL
4830 /* atapi: mark qc as inactive to prevent the interrupt handler
4831 * from completing the command twice later, before the error handler
4832 * is called. (when rc != 0 and atapi request sense is needed)
4833 */
4834 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4835 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4836
1da177e4 4837 /* call completion callback */
77853bf2 4838 qc->complete_fn(qc);
1da177e4
LT
4839}
4840
39599a53
TH
4841static void fill_result_tf(struct ata_queued_cmd *qc)
4842{
4843 struct ata_port *ap = qc->ap;
4844
39599a53 4845 qc->result_tf.flags = qc->tf.flags;
4742d54f 4846 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
4847}
4848
f686bcb8
TH
4849/**
4850 * ata_qc_complete - Complete an active ATA command
4851 * @qc: Command to complete
4852 * @err_mask: ATA Status register contents
4853 *
4854 * Indicate to the mid and upper layers that an ATA
4855 * command has completed, with either an ok or not-ok status.
4856 *
4857 * LOCKING:
cca3974e 4858 * spin_lock_irqsave(host lock)
f686bcb8
TH
4859 */
4860void ata_qc_complete(struct ata_queued_cmd *qc)
4861{
4862 struct ata_port *ap = qc->ap;
4863
4864 /* XXX: New EH and old EH use different mechanisms to
4865 * synchronize EH with regular execution path.
4866 *
4867 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4868 * Normal execution path is responsible for not accessing a
4869 * failed qc. libata core enforces the rule by returning NULL
4870 * from ata_qc_from_tag() for failed qcs.
4871 *
4872 * Old EH depends on ata_qc_complete() nullifying completion
4873 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4874 * not synchronize with interrupt handler. Only PIO task is
4875 * taken care of.
4876 */
4877 if (ap->ops->error_handler) {
b51e9e5d 4878 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4879
4880 if (unlikely(qc->err_mask))
4881 qc->flags |= ATA_QCFLAG_FAILED;
4882
4883 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4884 if (!ata_tag_internal(qc->tag)) {
4885 /* always fill result TF for failed qc */
39599a53 4886 fill_result_tf(qc);
f686bcb8
TH
4887 ata_qc_schedule_eh(qc);
4888 return;
4889 }
4890 }
4891
4892 /* read result TF if requested */
4893 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4894 fill_result_tf(qc);
f686bcb8
TH
4895
4896 __ata_qc_complete(qc);
4897 } else {
4898 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4899 return;
4900
4901 /* read result TF if failed or requested */
4902 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4903 fill_result_tf(qc);
f686bcb8
TH
4904
4905 __ata_qc_complete(qc);
4906 }
4907}
4908
dedaf2b0
TH
4909/**
4910 * ata_qc_complete_multiple - Complete multiple qcs successfully
4911 * @ap: port in question
4912 * @qc_active: new qc_active mask
4913 * @finish_qc: LLDD callback invoked before completing a qc
4914 *
4915 * Complete in-flight commands. This functions is meant to be
4916 * called from low-level driver's interrupt routine to complete
4917 * requests normally. ap->qc_active and @qc_active is compared
4918 * and commands are completed accordingly.
4919 *
4920 * LOCKING:
cca3974e 4921 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4922 *
4923 * RETURNS:
4924 * Number of completed commands on success, -errno otherwise.
4925 */
4926int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4927 void (*finish_qc)(struct ata_queued_cmd *))
4928{
4929 int nr_done = 0;
4930 u32 done_mask;
4931 int i;
4932
4933 done_mask = ap->qc_active ^ qc_active;
4934
4935 if (unlikely(done_mask & qc_active)) {
4936 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4937 "(%08x->%08x)\n", ap->qc_active, qc_active);
4938 return -EINVAL;
4939 }
4940
4941 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4942 struct ata_queued_cmd *qc;
4943
4944 if (!(done_mask & (1 << i)))
4945 continue;
4946
4947 if ((qc = ata_qc_from_tag(ap, i))) {
4948 if (finish_qc)
4949 finish_qc(qc);
4950 ata_qc_complete(qc);
4951 nr_done++;
4952 }
4953 }
4954
4955 return nr_done;
4956}
4957
1da177e4
LT
4958static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4959{
4960 struct ata_port *ap = qc->ap;
4961
4962 switch (qc->tf.protocol) {
3dc1d881 4963 case ATA_PROT_NCQ:
1da177e4
LT
4964 case ATA_PROT_DMA:
4965 case ATA_PROT_ATAPI_DMA:
4966 return 1;
4967
4968 case ATA_PROT_ATAPI:
4969 case ATA_PROT_PIO:
1da177e4
LT
4970 if (ap->flags & ATA_FLAG_PIO_DMA)
4971 return 1;
4972
4973 /* fall through */
4974
4975 default:
4976 return 0;
4977 }
4978
4979 /* never reached */
4980}
4981
4982/**
4983 * ata_qc_issue - issue taskfile to device
4984 * @qc: command to issue to device
4985 *
4986 * Prepare an ATA command to submission to device.
4987 * This includes mapping the data into a DMA-able
4988 * area, filling in the S/G table, and finally
4989 * writing the taskfile to hardware, starting the command.
4990 *
4991 * LOCKING:
cca3974e 4992 * spin_lock_irqsave(host lock)
1da177e4 4993 */
8e0e694a 4994void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4995{
4996 struct ata_port *ap = qc->ap;
4997
dedaf2b0
TH
4998 /* Make sure only one non-NCQ command is outstanding. The
4999 * check is skipped for old EH because it reuses active qc to
5000 * request ATAPI sense.
5001 */
5002 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
5003
5004 if (qc->tf.protocol == ATA_PROT_NCQ) {
5005 WARN_ON(ap->sactive & (1 << qc->tag));
5006 ap->sactive |= 1 << qc->tag;
5007 } else {
5008 WARN_ON(ap->sactive);
5009 ap->active_tag = qc->tag;
5010 }
5011
e4a70e76 5012 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5013 ap->qc_active |= 1 << qc->tag;
e4a70e76 5014
1da177e4
LT
5015 if (ata_should_dma_map(qc)) {
5016 if (qc->flags & ATA_QCFLAG_SG) {
5017 if (ata_sg_setup(qc))
8e436af9 5018 goto sg_err;
1da177e4
LT
5019 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5020 if (ata_sg_setup_one(qc))
8e436af9 5021 goto sg_err;
1da177e4
LT
5022 }
5023 } else {
5024 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5025 }
5026
5027 ap->ops->qc_prep(qc);
5028
8e0e694a
TH
5029 qc->err_mask |= ap->ops->qc_issue(qc);
5030 if (unlikely(qc->err_mask))
5031 goto err;
5032 return;
1da177e4 5033
8e436af9
TH
5034sg_err:
5035 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5036 qc->err_mask |= AC_ERR_SYSTEM;
5037err:
5038 ata_qc_complete(qc);
1da177e4
LT
5039}
5040
5041/**
5042 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5043 * @qc: command to issue to device
5044 *
5045 * Using various libata functions and hooks, this function
5046 * starts an ATA command. ATA commands are grouped into
5047 * classes called "protocols", and issuing each type of protocol
5048 * is slightly different.
5049 *
0baab86b
EF
5050 * May be used as the qc_issue() entry in ata_port_operations.
5051 *
1da177e4 5052 * LOCKING:
cca3974e 5053 * spin_lock_irqsave(host lock)
1da177e4
LT
5054 *
5055 * RETURNS:
9a3d9eb0 5056 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5057 */
5058
9a3d9eb0 5059unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5060{
5061 struct ata_port *ap = qc->ap;
5062
e50362ec
AL
5063 /* Use polling pio if the LLD doesn't handle
5064 * interrupt driven pio and atapi CDB interrupt.
5065 */
5066 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5067 switch (qc->tf.protocol) {
5068 case ATA_PROT_PIO:
e3472cbe 5069 case ATA_PROT_NODATA:
e50362ec
AL
5070 case ATA_PROT_ATAPI:
5071 case ATA_PROT_ATAPI_NODATA:
5072 qc->tf.flags |= ATA_TFLAG_POLLING;
5073 break;
5074 case ATA_PROT_ATAPI_DMA:
5075 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5076 /* see ata_dma_blacklisted() */
e50362ec
AL
5077 BUG();
5078 break;
5079 default:
5080 break;
5081 }
5082 }
5083
3d3cca37
TH
5084 /* Some controllers show flaky interrupt behavior after
5085 * setting xfer mode. Use polling instead.
5086 */
5087 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
5088 qc->tf.feature == SETFEATURES_XFER) &&
5089 (ap->flags & ATA_FLAG_SETXFER_POLLING))
5090 qc->tf.flags |= ATA_TFLAG_POLLING;
5091
312f7da2 5092 /* select the device */
1da177e4
LT
5093 ata_dev_select(ap, qc->dev->devno, 1, 0);
5094
312f7da2 5095 /* start the command */
1da177e4
LT
5096 switch (qc->tf.protocol) {
5097 case ATA_PROT_NODATA:
312f7da2
AL
5098 if (qc->tf.flags & ATA_TFLAG_POLLING)
5099 ata_qc_set_polling(qc);
5100
e5338254 5101 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5102 ap->hsm_task_state = HSM_ST_LAST;
5103
5104 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5105 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5106
1da177e4
LT
5107 break;
5108
5109 case ATA_PROT_DMA:
587005de 5110 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5111
1da177e4
LT
5112 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5113 ap->ops->bmdma_setup(qc); /* set up bmdma */
5114 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5115 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5116 break;
5117
312f7da2
AL
5118 case ATA_PROT_PIO:
5119 if (qc->tf.flags & ATA_TFLAG_POLLING)
5120 ata_qc_set_polling(qc);
1da177e4 5121
e5338254 5122 ata_tf_to_host(ap, &qc->tf);
312f7da2 5123
54f00389
AL
5124 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5125 /* PIO data out protocol */
5126 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5127 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5128
5129 /* always send first data block using
e27486db 5130 * the ata_pio_task() codepath.
54f00389 5131 */
312f7da2 5132 } else {
54f00389
AL
5133 /* PIO data in protocol */
5134 ap->hsm_task_state = HSM_ST;
5135
5136 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5137 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5138
5139 /* if polling, ata_pio_task() handles the rest.
5140 * otherwise, interrupt handler takes over from here.
5141 */
312f7da2
AL
5142 }
5143
1da177e4
LT
5144 break;
5145
1da177e4 5146 case ATA_PROT_ATAPI:
1da177e4 5147 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5148 if (qc->tf.flags & ATA_TFLAG_POLLING)
5149 ata_qc_set_polling(qc);
5150
e5338254 5151 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5152
312f7da2
AL
5153 ap->hsm_task_state = HSM_ST_FIRST;
5154
5155 /* send cdb by polling if no cdb interrupt */
5156 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5157 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5158 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5159 break;
5160
5161 case ATA_PROT_ATAPI_DMA:
587005de 5162 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5163
1da177e4
LT
5164 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5165 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5166 ap->hsm_task_state = HSM_ST_FIRST;
5167
5168 /* send cdb by polling if no cdb interrupt */
5169 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5170 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5171 break;
5172
5173 default:
5174 WARN_ON(1);
9a3d9eb0 5175 return AC_ERR_SYSTEM;
1da177e4
LT
5176 }
5177
5178 return 0;
5179}
5180
1da177e4
LT
5181/**
5182 * ata_host_intr - Handle host interrupt for given (port, task)
5183 * @ap: Port on which interrupt arrived (possibly...)
5184 * @qc: Taskfile currently active in engine
5185 *
5186 * Handle host interrupt for given queued command. Currently,
5187 * only DMA interrupts are handled. All other commands are
5188 * handled via polling with interrupts disabled (nIEN bit).
5189 *
5190 * LOCKING:
cca3974e 5191 * spin_lock_irqsave(host lock)
1da177e4
LT
5192 *
5193 * RETURNS:
5194 * One if interrupt was handled, zero if not (shared irq).
5195 */
5196
5197inline unsigned int ata_host_intr (struct ata_port *ap,
5198 struct ata_queued_cmd *qc)
5199{
ea54763f 5200 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5201 u8 status, host_stat = 0;
1da177e4 5202
312f7da2 5203 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5204 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5205
312f7da2
AL
5206 /* Check whether we are expecting interrupt in this state */
5207 switch (ap->hsm_task_state) {
5208 case HSM_ST_FIRST:
6912ccd5
AL
5209 /* Some pre-ATAPI-4 devices assert INTRQ
5210 * at this state when ready to receive CDB.
5211 */
1da177e4 5212
312f7da2
AL
5213 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5214 * The flag was turned on only for atapi devices.
5215 * No need to check is_atapi_taskfile(&qc->tf) again.
5216 */
5217 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5218 goto idle_irq;
1da177e4 5219 break;
312f7da2
AL
5220 case HSM_ST_LAST:
5221 if (qc->tf.protocol == ATA_PROT_DMA ||
5222 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5223 /* check status of DMA engine */
5224 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5225 VPRINTK("ata%u: host_stat 0x%X\n",
5226 ap->print_id, host_stat);
312f7da2
AL
5227
5228 /* if it's not our irq... */
5229 if (!(host_stat & ATA_DMA_INTR))
5230 goto idle_irq;
5231
5232 /* before we do anything else, clear DMA-Start bit */
5233 ap->ops->bmdma_stop(qc);
a4f16610
AL
5234
5235 if (unlikely(host_stat & ATA_DMA_ERR)) {
5236 /* error when transfering data to/from memory */
5237 qc->err_mask |= AC_ERR_HOST_BUS;
5238 ap->hsm_task_state = HSM_ST_ERR;
5239 }
312f7da2
AL
5240 }
5241 break;
5242 case HSM_ST:
5243 break;
1da177e4
LT
5244 default:
5245 goto idle_irq;
5246 }
5247
312f7da2
AL
5248 /* check altstatus */
5249 status = ata_altstatus(ap);
5250 if (status & ATA_BUSY)
5251 goto idle_irq;
1da177e4 5252
312f7da2
AL
5253 /* check main status, clearing INTRQ */
5254 status = ata_chk_status(ap);
5255 if (unlikely(status & ATA_BUSY))
5256 goto idle_irq;
1da177e4 5257
312f7da2
AL
5258 /* ack bmdma irq events */
5259 ap->ops->irq_clear(ap);
1da177e4 5260
bb5cb290 5261 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5262
5263 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5264 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5265 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5266
1da177e4
LT
5267 return 1; /* irq handled */
5268
5269idle_irq:
5270 ap->stats.idle_irq++;
5271
5272#ifdef ATA_IRQ_TRAP
5273 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5274 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5275 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5276 return 1;
1da177e4
LT
5277 }
5278#endif
5279 return 0; /* irq not handled */
5280}
5281
5282/**
5283 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5284 * @irq: irq line (unused)
cca3974e 5285 * @dev_instance: pointer to our ata_host information structure
1da177e4 5286 *
0cba632b
JG
5287 * Default interrupt handler for PCI IDE devices. Calls
5288 * ata_host_intr() for each port that is not disabled.
5289 *
1da177e4 5290 * LOCKING:
cca3974e 5291 * Obtains host lock during operation.
1da177e4
LT
5292 *
5293 * RETURNS:
0cba632b 5294 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5295 */
5296
7d12e780 5297irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5298{
cca3974e 5299 struct ata_host *host = dev_instance;
1da177e4
LT
5300 unsigned int i;
5301 unsigned int handled = 0;
5302 unsigned long flags;
5303
5304 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5305 spin_lock_irqsave(&host->lock, flags);
1da177e4 5306
cca3974e 5307 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5308 struct ata_port *ap;
5309
cca3974e 5310 ap = host->ports[i];
c1389503 5311 if (ap &&
029f5468 5312 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5313 struct ata_queued_cmd *qc;
5314
5315 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5316 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5317 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5318 handled |= ata_host_intr(ap, qc);
5319 }
5320 }
5321
cca3974e 5322 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5323
5324 return IRQ_RETVAL(handled);
5325}
5326
34bf2170
TH
5327/**
5328 * sata_scr_valid - test whether SCRs are accessible
5329 * @ap: ATA port to test SCR accessibility for
5330 *
5331 * Test whether SCRs are accessible for @ap.
5332 *
5333 * LOCKING:
5334 * None.
5335 *
5336 * RETURNS:
5337 * 1 if SCRs are accessible, 0 otherwise.
5338 */
5339int sata_scr_valid(struct ata_port *ap)
5340{
5341 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5342}
5343
5344/**
5345 * sata_scr_read - read SCR register of the specified port
5346 * @ap: ATA port to read SCR for
5347 * @reg: SCR to read
5348 * @val: Place to store read value
5349 *
5350 * Read SCR register @reg of @ap into *@val. This function is
5351 * guaranteed to succeed if the cable type of the port is SATA
5352 * and the port implements ->scr_read.
5353 *
5354 * LOCKING:
5355 * None.
5356 *
5357 * RETURNS:
5358 * 0 on success, negative errno on failure.
5359 */
5360int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5361{
5362 if (sata_scr_valid(ap)) {
5363 *val = ap->ops->scr_read(ap, reg);
5364 return 0;
5365 }
5366 return -EOPNOTSUPP;
5367}
5368
5369/**
5370 * sata_scr_write - write SCR register of the specified port
5371 * @ap: ATA port to write SCR for
5372 * @reg: SCR to write
5373 * @val: value to write
5374 *
5375 * Write @val to SCR register @reg of @ap. This function is
5376 * guaranteed to succeed if the cable type of the port is SATA
5377 * and the port implements ->scr_read.
5378 *
5379 * LOCKING:
5380 * None.
5381 *
5382 * RETURNS:
5383 * 0 on success, negative errno on failure.
5384 */
5385int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5386{
5387 if (sata_scr_valid(ap)) {
5388 ap->ops->scr_write(ap, reg, val);
5389 return 0;
5390 }
5391 return -EOPNOTSUPP;
5392}
5393
5394/**
5395 * sata_scr_write_flush - write SCR register of the specified port and flush
5396 * @ap: ATA port to write SCR for
5397 * @reg: SCR to write
5398 * @val: value to write
5399 *
5400 * This function is identical to sata_scr_write() except that this
5401 * function performs flush after writing to the register.
5402 *
5403 * LOCKING:
5404 * None.
5405 *
5406 * RETURNS:
5407 * 0 on success, negative errno on failure.
5408 */
5409int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5410{
5411 if (sata_scr_valid(ap)) {
5412 ap->ops->scr_write(ap, reg, val);
5413 ap->ops->scr_read(ap, reg);
5414 return 0;
5415 }
5416 return -EOPNOTSUPP;
5417}
5418
5419/**
5420 * ata_port_online - test whether the given port is online
5421 * @ap: ATA port to test
5422 *
5423 * Test whether @ap is online. Note that this function returns 0
5424 * if online status of @ap cannot be obtained, so
5425 * ata_port_online(ap) != !ata_port_offline(ap).
5426 *
5427 * LOCKING:
5428 * None.
5429 *
5430 * RETURNS:
5431 * 1 if the port online status is available and online.
5432 */
5433int ata_port_online(struct ata_port *ap)
5434{
5435 u32 sstatus;
5436
5437 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5438 return 1;
5439 return 0;
5440}
5441
5442/**
5443 * ata_port_offline - test whether the given port is offline
5444 * @ap: ATA port to test
5445 *
5446 * Test whether @ap is offline. Note that this function returns
5447 * 0 if offline status of @ap cannot be obtained, so
5448 * ata_port_online(ap) != !ata_port_offline(ap).
5449 *
5450 * LOCKING:
5451 * None.
5452 *
5453 * RETURNS:
5454 * 1 if the port offline status is available and offline.
5455 */
5456int ata_port_offline(struct ata_port *ap)
5457{
5458 u32 sstatus;
5459
5460 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5461 return 1;
5462 return 0;
5463}
0baab86b 5464
77b08fb5 5465int ata_flush_cache(struct ata_device *dev)
9b847548 5466{
977e6b9f 5467 unsigned int err_mask;
9b847548
JA
5468 u8 cmd;
5469
5470 if (!ata_try_flush_cache(dev))
5471 return 0;
5472
6fc49adb 5473 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5474 cmd = ATA_CMD_FLUSH_EXT;
5475 else
5476 cmd = ATA_CMD_FLUSH;
5477
977e6b9f
TH
5478 err_mask = ata_do_simple_cmd(dev, cmd);
5479 if (err_mask) {
5480 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5481 return -EIO;
5482 }
5483
5484 return 0;
9b847548
JA
5485}
5486
6ffa01d8 5487#ifdef CONFIG_PM
cca3974e
JG
5488static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5489 unsigned int action, unsigned int ehi_flags,
5490 int wait)
500530f6
TH
5491{
5492 unsigned long flags;
5493 int i, rc;
5494
cca3974e
JG
5495 for (i = 0; i < host->n_ports; i++) {
5496 struct ata_port *ap = host->ports[i];
500530f6
TH
5497
5498 /* Previous resume operation might still be in
5499 * progress. Wait for PM_PENDING to clear.
5500 */
5501 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5502 ata_port_wait_eh(ap);
5503 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5504 }
5505
5506 /* request PM ops to EH */
5507 spin_lock_irqsave(ap->lock, flags);
5508
5509 ap->pm_mesg = mesg;
5510 if (wait) {
5511 rc = 0;
5512 ap->pm_result = &rc;
5513 }
5514
5515 ap->pflags |= ATA_PFLAG_PM_PENDING;
5516 ap->eh_info.action |= action;
5517 ap->eh_info.flags |= ehi_flags;
5518
5519 ata_port_schedule_eh(ap);
5520
5521 spin_unlock_irqrestore(ap->lock, flags);
5522
5523 /* wait and check result */
5524 if (wait) {
5525 ata_port_wait_eh(ap);
5526 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5527 if (rc)
5528 return rc;
5529 }
5530 }
5531
5532 return 0;
5533}
5534
5535/**
cca3974e
JG
5536 * ata_host_suspend - suspend host
5537 * @host: host to suspend
500530f6
TH
5538 * @mesg: PM message
5539 *
cca3974e 5540 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5541 * function requests EH to perform PM operations and waits for EH
5542 * to finish.
5543 *
5544 * LOCKING:
5545 * Kernel thread context (may sleep).
5546 *
5547 * RETURNS:
5548 * 0 on success, -errno on failure.
5549 */
cca3974e 5550int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5551{
5552 int i, j, rc;
5553
cca3974e 5554 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5555 if (rc)
5556 goto fail;
5557
5558 /* EH is quiescent now. Fail if we have any ready device.
5559 * This happens if hotplug occurs between completion of device
5560 * suspension and here.
5561 */
cca3974e
JG
5562 for (i = 0; i < host->n_ports; i++) {
5563 struct ata_port *ap = host->ports[i];
500530f6
TH
5564
5565 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5566 struct ata_device *dev = &ap->device[j];
5567
5568 if (ata_dev_ready(dev)) {
5569 ata_port_printk(ap, KERN_WARNING,
5570 "suspend failed, device %d "
5571 "still active\n", dev->devno);
5572 rc = -EBUSY;
5573 goto fail;
5574 }
5575 }
5576 }
5577
cca3974e 5578 host->dev->power.power_state = mesg;
500530f6
TH
5579 return 0;
5580
5581 fail:
cca3974e 5582 ata_host_resume(host);
500530f6
TH
5583 return rc;
5584}
5585
5586/**
cca3974e
JG
5587 * ata_host_resume - resume host
5588 * @host: host to resume
500530f6 5589 *
cca3974e 5590 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5591 * function requests EH to perform PM operations and returns.
5592 * Note that all resume operations are performed parallely.
5593 *
5594 * LOCKING:
5595 * Kernel thread context (may sleep).
5596 */
cca3974e 5597void ata_host_resume(struct ata_host *host)
500530f6 5598{
cca3974e
JG
5599 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5600 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5601 host->dev->power.power_state = PMSG_ON;
500530f6 5602}
6ffa01d8 5603#endif
500530f6 5604
c893a3ae
RD
5605/**
5606 * ata_port_start - Set port up for dma.
5607 * @ap: Port to initialize
5608 *
5609 * Called just after data structures for each port are
5610 * initialized. Allocates space for PRD table.
5611 *
5612 * May be used as the port_start() entry in ata_port_operations.
5613 *
5614 * LOCKING:
5615 * Inherited from caller.
5616 */
f0d36efd 5617int ata_port_start(struct ata_port *ap)
1da177e4 5618{
2f1f610b 5619 struct device *dev = ap->dev;
6037d6bb 5620 int rc;
1da177e4 5621
f0d36efd
TH
5622 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5623 GFP_KERNEL);
1da177e4
LT
5624 if (!ap->prd)
5625 return -ENOMEM;
5626
6037d6bb 5627 rc = ata_pad_alloc(ap, dev);
f0d36efd 5628 if (rc)
6037d6bb 5629 return rc;
1da177e4 5630
f0d36efd
TH
5631 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5632 (unsigned long long)ap->prd_dma);
1da177e4
LT
5633 return 0;
5634}
5635
3ef3b43d
TH
5636/**
5637 * ata_dev_init - Initialize an ata_device structure
5638 * @dev: Device structure to initialize
5639 *
5640 * Initialize @dev in preparation for probing.
5641 *
5642 * LOCKING:
5643 * Inherited from caller.
5644 */
5645void ata_dev_init(struct ata_device *dev)
5646{
5647 struct ata_port *ap = dev->ap;
72fa4b74
TH
5648 unsigned long flags;
5649
5a04bf4b
TH
5650 /* SATA spd limit is bound to the first device */
5651 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5652
72fa4b74
TH
5653 /* High bits of dev->flags are used to record warm plug
5654 * requests which occur asynchronously. Synchronize using
cca3974e 5655 * host lock.
72fa4b74 5656 */
ba6a1308 5657 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5658 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5659 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5660
72fa4b74
TH
5661 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5662 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5663 dev->pio_mask = UINT_MAX;
5664 dev->mwdma_mask = UINT_MAX;
5665 dev->udma_mask = UINT_MAX;
5666}
5667
1da177e4 5668/**
f3187195
TH
5669 * ata_port_alloc - allocate and initialize basic ATA port resources
5670 * @host: ATA host this allocated port belongs to
1da177e4 5671 *
f3187195
TH
5672 * Allocate and initialize basic ATA port resources.
5673 *
5674 * RETURNS:
5675 * Allocate ATA port on success, NULL on failure.
0cba632b 5676 *
1da177e4 5677 * LOCKING:
f3187195 5678 * Inherited from calling layer (may sleep).
1da177e4 5679 */
f3187195 5680struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5681{
f3187195 5682 struct ata_port *ap;
1da177e4
LT
5683 unsigned int i;
5684
f3187195
TH
5685 DPRINTK("ENTER\n");
5686
5687 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5688 if (!ap)
5689 return NULL;
5690
cca3974e 5691 ap->lock = &host->lock;
198e0fed 5692 ap->flags = ATA_FLAG_DISABLED;
f3187195 5693 ap->print_id = -1;
1da177e4 5694 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5695 ap->host = host;
f3187195
TH
5696 ap->dev = host->dev;
5697
5a04bf4b 5698 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5699 ap->active_tag = ATA_TAG_POISON;
5700 ap->last_ctl = 0xFF;
bd5d825c
BP
5701
5702#if defined(ATA_VERBOSE_DEBUG)
5703 /* turn on all debugging levels */
5704 ap->msg_enable = 0x00FF;
5705#elif defined(ATA_DEBUG)
5706 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5707#else
0dd4b21f 5708 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5709#endif
1da177e4 5710
65f27f38
DH
5711 INIT_DELAYED_WORK(&ap->port_task, NULL);
5712 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5713 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5714 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5715 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5716
838df628 5717 ap->cbl = ATA_CBL_NONE;
838df628 5718
acf356b1
TH
5719 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5720 struct ata_device *dev = &ap->device[i];
38d87234 5721 dev->ap = ap;
72fa4b74 5722 dev->devno = i;
3ef3b43d 5723 ata_dev_init(dev);
acf356b1 5724 }
1da177e4
LT
5725
5726#ifdef ATA_IRQ_TRAP
5727 ap->stats.unhandled_irq = 1;
5728 ap->stats.idle_irq = 1;
5729#endif
1da177e4 5730 return ap;
1da177e4
LT
5731}
5732
f0d36efd
TH
5733static void ata_host_release(struct device *gendev, void *res)
5734{
5735 struct ata_host *host = dev_get_drvdata(gendev);
5736 int i;
5737
5738 for (i = 0; i < host->n_ports; i++) {
5739 struct ata_port *ap = host->ports[i];
5740
ecef7253
TH
5741 if (!ap)
5742 continue;
5743
5744 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 5745 ap->ops->port_stop(ap);
f0d36efd
TH
5746 }
5747
ecef7253 5748 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 5749 host->ops->host_stop(host);
1aa56cca 5750
1aa506e4
TH
5751 for (i = 0; i < host->n_ports; i++) {
5752 struct ata_port *ap = host->ports[i];
5753
4911487a
TH
5754 if (!ap)
5755 continue;
5756
5757 if (ap->scsi_host)
1aa506e4
TH
5758 scsi_host_put(ap->scsi_host);
5759
4911487a 5760 kfree(ap);
1aa506e4
TH
5761 host->ports[i] = NULL;
5762 }
5763
1aa56cca 5764 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
5765}
5766
f3187195
TH
5767/**
5768 * ata_host_alloc - allocate and init basic ATA host resources
5769 * @dev: generic device this host is associated with
5770 * @max_ports: maximum number of ATA ports associated with this host
5771 *
5772 * Allocate and initialize basic ATA host resources. LLD calls
5773 * this function to allocate a host, initializes it fully and
5774 * attaches it using ata_host_register().
5775 *
5776 * @max_ports ports are allocated and host->n_ports is
5777 * initialized to @max_ports. The caller is allowed to decrease
5778 * host->n_ports before calling ata_host_register(). The unused
5779 * ports will be automatically freed on registration.
5780 *
5781 * RETURNS:
5782 * Allocate ATA host on success, NULL on failure.
5783 *
5784 * LOCKING:
5785 * Inherited from calling layer (may sleep).
5786 */
5787struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5788{
5789 struct ata_host *host;
5790 size_t sz;
5791 int i;
5792
5793 DPRINTK("ENTER\n");
5794
5795 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5796 return NULL;
5797
5798 /* alloc a container for our list of ATA ports (buses) */
5799 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5800 /* alloc a container for our list of ATA ports (buses) */
5801 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5802 if (!host)
5803 goto err_out;
5804
5805 devres_add(dev, host);
5806 dev_set_drvdata(dev, host);
5807
5808 spin_lock_init(&host->lock);
5809 host->dev = dev;
5810 host->n_ports = max_ports;
5811
5812 /* allocate ports bound to this host */
5813 for (i = 0; i < max_ports; i++) {
5814 struct ata_port *ap;
5815
5816 ap = ata_port_alloc(host);
5817 if (!ap)
5818 goto err_out;
5819
5820 ap->port_no = i;
5821 host->ports[i] = ap;
5822 }
5823
5824 devres_remove_group(dev, NULL);
5825 return host;
5826
5827 err_out:
5828 devres_release_group(dev, NULL);
5829 return NULL;
5830}
5831
ecef7253
TH
5832/**
5833 * ata_host_start - start and freeze ports of an ATA host
5834 * @host: ATA host to start ports for
5835 *
5836 * Start and then freeze ports of @host. Started status is
5837 * recorded in host->flags, so this function can be called
5838 * multiple times. Ports are guaranteed to get started only
f3187195
TH
5839 * once. If host->ops isn't initialized yet, its set to the
5840 * first non-dummy port ops.
ecef7253
TH
5841 *
5842 * LOCKING:
5843 * Inherited from calling layer (may sleep).
5844 *
5845 * RETURNS:
5846 * 0 if all ports are started successfully, -errno otherwise.
5847 */
5848int ata_host_start(struct ata_host *host)
5849{
5850 int i, rc;
5851
5852 if (host->flags & ATA_HOST_STARTED)
5853 return 0;
5854
5855 for (i = 0; i < host->n_ports; i++) {
5856 struct ata_port *ap = host->ports[i];
5857
f3187195
TH
5858 if (!host->ops && !ata_port_is_dummy(ap))
5859 host->ops = ap->ops;
5860
ecef7253
TH
5861 if (ap->ops->port_start) {
5862 rc = ap->ops->port_start(ap);
5863 if (rc) {
5864 ata_port_printk(ap, KERN_ERR, "failed to "
5865 "start port (errno=%d)\n", rc);
5866 goto err_out;
5867 }
5868 }
5869
5870 ata_eh_freeze_port(ap);
5871 }
5872
5873 host->flags |= ATA_HOST_STARTED;
5874 return 0;
5875
5876 err_out:
5877 while (--i >= 0) {
5878 struct ata_port *ap = host->ports[i];
5879
5880 if (ap->ops->port_stop)
5881 ap->ops->port_stop(ap);
5882 }
5883 return rc;
5884}
5885
b03732f0 5886/**
cca3974e
JG
5887 * ata_sas_host_init - Initialize a host struct
5888 * @host: host to initialize
5889 * @dev: device host is attached to
5890 * @flags: host flags
5891 * @ops: port_ops
b03732f0
BK
5892 *
5893 * LOCKING:
5894 * PCI/etc. bus probe sem.
5895 *
5896 */
f3187195 5897/* KILLME - the only user left is ipr */
cca3974e
JG
5898void ata_host_init(struct ata_host *host, struct device *dev,
5899 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5900{
cca3974e
JG
5901 spin_lock_init(&host->lock);
5902 host->dev = dev;
5903 host->flags = flags;
5904 host->ops = ops;
b03732f0
BK
5905}
5906
f3187195
TH
5907/**
5908 * ata_host_register - register initialized ATA host
5909 * @host: ATA host to register
5910 * @sht: template for SCSI host
5911 *
5912 * Register initialized ATA host. @host is allocated using
5913 * ata_host_alloc() and fully initialized by LLD. This function
5914 * starts ports, registers @host with ATA and SCSI layers and
5915 * probe registered devices.
5916 *
5917 * LOCKING:
5918 * Inherited from calling layer (may sleep).
5919 *
5920 * RETURNS:
5921 * 0 on success, -errno otherwise.
5922 */
5923int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5924{
5925 int i, rc;
5926
5927 /* host must have been started */
5928 if (!(host->flags & ATA_HOST_STARTED)) {
5929 dev_printk(KERN_ERR, host->dev,
5930 "BUG: trying to register unstarted host\n");
5931 WARN_ON(1);
5932 return -EINVAL;
5933 }
5934
5935 /* Blow away unused ports. This happens when LLD can't
5936 * determine the exact number of ports to allocate at
5937 * allocation time.
5938 */
5939 for (i = host->n_ports; host->ports[i]; i++)
5940 kfree(host->ports[i]);
5941
5942 /* give ports names and add SCSI hosts */
5943 for (i = 0; i < host->n_ports; i++)
5944 host->ports[i]->print_id = ata_print_id++;
5945
5946 rc = ata_scsi_add_hosts(host, sht);
5947 if (rc)
5948 return rc;
5949
5950 /* set cable, sata_spd_limit and report */
5951 for (i = 0; i < host->n_ports; i++) {
5952 struct ata_port *ap = host->ports[i];
5953 int irq_line;
5954 u32 scontrol;
5955 unsigned long xfer_mask;
5956
5957 /* set SATA cable type if still unset */
5958 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5959 ap->cbl = ATA_CBL_SATA;
5960
5961 /* init sata_spd_limit to the current value */
5962 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5963 int spd = (scontrol >> 4) & 0xf;
5964 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5965 }
5966 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5967
5968 /* report the secondary IRQ for second channel legacy */
5969 irq_line = host->irq;
5970 if (i == 1 && host->irq2)
5971 irq_line = host->irq2;
5972
5973 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5974 ap->udma_mask);
5975
5976 /* print per-port info to dmesg */
5977 if (!ata_port_is_dummy(ap))
5978 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5979 "ctl 0x%p bmdma 0x%p irq %d\n",
5980 ap->cbl == ATA_CBL_SATA ? 'S' : 'P',
5981 ata_mode_string(xfer_mask),
5982 ap->ioaddr.cmd_addr,
5983 ap->ioaddr.ctl_addr,
5984 ap->ioaddr.bmdma_addr,
5985 irq_line);
5986 else
5987 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5988 }
5989
5990 /* perform each probe synchronously */
5991 DPRINTK("probe begin\n");
5992 for (i = 0; i < host->n_ports; i++) {
5993 struct ata_port *ap = host->ports[i];
5994 int rc;
5995
5996 /* probe */
5997 if (ap->ops->error_handler) {
5998 struct ata_eh_info *ehi = &ap->eh_info;
5999 unsigned long flags;
6000
6001 ata_port_probe(ap);
6002
6003 /* kick EH for boot probing */
6004 spin_lock_irqsave(ap->lock, flags);
6005
6006 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6007 ehi->action |= ATA_EH_SOFTRESET;
6008 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6009
6010 ap->pflags |= ATA_PFLAG_LOADING;
6011 ata_port_schedule_eh(ap);
6012
6013 spin_unlock_irqrestore(ap->lock, flags);
6014
6015 /* wait for EH to finish */
6016 ata_port_wait_eh(ap);
6017 } else {
6018 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6019 rc = ata_bus_probe(ap);
6020 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6021
6022 if (rc) {
6023 /* FIXME: do something useful here?
6024 * Current libata behavior will
6025 * tear down everything when
6026 * the module is removed
6027 * or the h/w is unplugged.
6028 */
6029 }
6030 }
6031 }
6032
6033 /* probes are done, now scan each port's disk(s) */
6034 DPRINTK("host probe begin\n");
6035 for (i = 0; i < host->n_ports; i++) {
6036 struct ata_port *ap = host->ports[i];
6037
6038 ata_scsi_scan_host(ap);
6039 }
6040
6041 return 0;
6042}
6043
1da177e4 6044/**
0cba632b
JG
6045 * ata_device_add - Register hardware device with ATA and SCSI layers
6046 * @ent: Probe information describing hardware device to be registered
6047 *
6048 * This function processes the information provided in the probe
6049 * information struct @ent, allocates the necessary ATA and SCSI
6050 * host information structures, initializes them, and registers
6051 * everything with requisite kernel subsystems.
6052 *
6053 * This function requests irqs, probes the ATA bus, and probes
6054 * the SCSI bus.
1da177e4
LT
6055 *
6056 * LOCKING:
0cba632b 6057 * PCI/etc. bus probe sem.
1da177e4
LT
6058 *
6059 * RETURNS:
0cba632b 6060 * Number of ports registered. Zero on error (no ports registered).
1da177e4 6061 */
057ace5e 6062int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 6063{
6d0500df 6064 unsigned int i;
1da177e4 6065 struct device *dev = ent->dev;
cca3974e 6066 struct ata_host *host;
39b07ce6 6067 int rc;
1da177e4
LT
6068
6069 DPRINTK("ENTER\n");
f20b16ff 6070
02f076aa
AC
6071 if (ent->irq == 0) {
6072 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
6073 return 0;
6074 }
f0d36efd 6075
f3187195
TH
6076 if (!ent->port_ops->error_handler &&
6077 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
6078 dev_printk(KERN_ERR, dev, "no reset mechanism available\n");
6079 return 0;
6080 }
6081
f0d36efd
TH
6082 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
6083 return 0;
6084
f3187195
TH
6085 /* allocate host */
6086 host = ata_host_alloc(dev, ent->n_ports);
1da177e4 6087
cca3974e
JG
6088 host->irq = ent->irq;
6089 host->irq2 = ent->irq2;
0d5ff566 6090 host->iomap = ent->iomap;
cca3974e 6091 host->private_data = ent->private_data;
f3187195
TH
6092 host->ops = ent->port_ops;
6093 host->flags = ent->_host_flags;
1da177e4 6094
cca3974e 6095 for (i = 0; i < host->n_ports; i++) {
f3187195 6096 struct ata_port *ap = host->ports[i];
1da177e4 6097
dd5b06c4
TH
6098 /* dummy? */
6099 if (ent->dummy_port_mask & (1 << i)) {
dd5b06c4
TH
6100 ap->ops = &ata_dummy_port_ops;
6101 continue;
6102 }
6103
f3187195
TH
6104 if (ap->port_no == 1 && ent->pinfo2) {
6105 ap->pio_mask = ent->pinfo2->pio_mask;
6106 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
6107 ap->udma_mask = ent->pinfo2->udma_mask;
6108 ap->flags |= ent->pinfo2->flags;
6109 ap->ops = ent->pinfo2->port_ops;
6110 } else {
6111 ap->pio_mask = ent->pio_mask;
6112 ap->mwdma_mask = ent->mwdma_mask;
6113 ap->udma_mask = ent->udma_mask;
6114 ap->flags |= ent->port_flags;
6115 ap->ops = ent->port_ops;
6116 }
1da177e4 6117
f3187195
TH
6118 memcpy(&ap->ioaddr, &ent->port[ap->port_no],
6119 sizeof(struct ata_ioports));
1da177e4
LT
6120 }
6121
f3187195 6122 /* start and freeze ports before requesting IRQ */
ecef7253
TH
6123 rc = ata_host_start(host);
6124 if (rc)
6125 goto err_out;
6126
2ec7df04 6127 /* obtain irq, that may be shared between channels */
f0d36efd
TH
6128 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
6129 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
6130 if (rc) {
6131 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
6132 ent->irq, rc);
1da177e4 6133 goto err_out;
39b07ce6 6134 }
1da177e4 6135
2ec7df04
AC
6136 /* do we have a second IRQ for the other channel, eg legacy mode */
6137 if (ent->irq2) {
6138 /* We will get weird core code crashes later if this is true
6139 so trap it now */
6140 BUG_ON(ent->irq == ent->irq2);
6141
f0d36efd
TH
6142 rc = devm_request_irq(dev, ent->irq2,
6143 ent->port_ops->irq_handler, ent->irq_flags,
6144 DRV_NAME, host);
2ec7df04
AC
6145 if (rc) {
6146 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
6147 ent->irq2, rc);
f0d36efd 6148 goto err_out;
2ec7df04
AC
6149 }
6150 }
6151
f0d36efd 6152 /* resource acquisition complete */
b878ca5d 6153 devres_remove_group(dev, ata_device_add);
f0d36efd 6154
f3187195
TH
6155 /* register */
6156 rc = ata_host_register(host, ent->sht);
6157 if (rc)
6158 goto err_out;
1da177e4 6159
f3187195
TH
6160 VPRINTK("EXIT, returning %u\n", host->n_ports);
6161 return host->n_ports; /* success */
1da177e4 6162
f0d36efd
TH
6163 err_out:
6164 devres_release_group(dev, ata_device_add);
f3187195 6165 VPRINTK("EXIT, returning 0\n");
1da177e4
LT
6166 return 0;
6167}
6168
720ba126
TH
6169/**
6170 * ata_port_detach - Detach ATA port in prepration of device removal
6171 * @ap: ATA port to be detached
6172 *
6173 * Detach all ATA devices and the associated SCSI devices of @ap;
6174 * then, remove the associated SCSI host. @ap is guaranteed to
6175 * be quiescent on return from this function.
6176 *
6177 * LOCKING:
6178 * Kernel thread context (may sleep).
6179 */
6180void ata_port_detach(struct ata_port *ap)
6181{
6182 unsigned long flags;
6183 int i;
6184
6185 if (!ap->ops->error_handler)
c3cf30a9 6186 goto skip_eh;
720ba126
TH
6187
6188 /* tell EH we're leaving & flush EH */
ba6a1308 6189 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6190 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6191 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6192
6193 ata_port_wait_eh(ap);
6194
6195 /* EH is now guaranteed to see UNLOADING, so no new device
6196 * will be attached. Disable all existing devices.
6197 */
ba6a1308 6198 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
6199
6200 for (i = 0; i < ATA_MAX_DEVICES; i++)
6201 ata_dev_disable(&ap->device[i]);
6202
ba6a1308 6203 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6204
6205 /* Final freeze & EH. All in-flight commands are aborted. EH
6206 * will be skipped and retrials will be terminated with bad
6207 * target.
6208 */
ba6a1308 6209 spin_lock_irqsave(ap->lock, flags);
720ba126 6210 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6211 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6212
6213 ata_port_wait_eh(ap);
6214
6215 /* Flush hotplug task. The sequence is similar to
6216 * ata_port_flush_task().
6217 */
6218 flush_workqueue(ata_aux_wq);
6219 cancel_delayed_work(&ap->hotplug_task);
6220 flush_workqueue(ata_aux_wq);
6221
c3cf30a9 6222 skip_eh:
720ba126 6223 /* remove the associated SCSI host */
cca3974e 6224 scsi_remove_host(ap->scsi_host);
720ba126
TH
6225}
6226
0529c159
TH
6227/**
6228 * ata_host_detach - Detach all ports of an ATA host
6229 * @host: Host to detach
6230 *
6231 * Detach all ports of @host.
6232 *
6233 * LOCKING:
6234 * Kernel thread context (may sleep).
6235 */
6236void ata_host_detach(struct ata_host *host)
6237{
6238 int i;
6239
6240 for (i = 0; i < host->n_ports; i++)
6241 ata_port_detach(host->ports[i]);
6242}
6243
f6d950e2
BK
6244struct ata_probe_ent *
6245ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
6246{
6247 struct ata_probe_ent *probe_ent;
6248
4d05447e 6249 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
6250 if (!probe_ent) {
6251 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
6252 kobject_name(&(dev->kobj)));
6253 return NULL;
6254 }
6255
6256 INIT_LIST_HEAD(&probe_ent->node);
6257 probe_ent->dev = dev;
6258
6259 probe_ent->sht = port->sht;
cca3974e 6260 probe_ent->port_flags = port->flags;
f6d950e2
BK
6261 probe_ent->pio_mask = port->pio_mask;
6262 probe_ent->mwdma_mask = port->mwdma_mask;
6263 probe_ent->udma_mask = port->udma_mask;
6264 probe_ent->port_ops = port->port_ops;
d639ca94 6265 probe_ent->private_data = port->private_data;
f6d950e2
BK
6266
6267 return probe_ent;
6268}
6269
1da177e4
LT
6270/**
6271 * ata_std_ports - initialize ioaddr with standard port offsets.
6272 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6273 *
6274 * Utility function which initializes data_addr, error_addr,
6275 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6276 * device_addr, status_addr, and command_addr to standard offsets
6277 * relative to cmd_addr.
6278 *
6279 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6280 */
0baab86b 6281
1da177e4
LT
6282void ata_std_ports(struct ata_ioports *ioaddr)
6283{
6284 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6285 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6286 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6287 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6288 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6289 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6290 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6291 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6292 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6293 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6294}
6295
0baab86b 6296
374b1873
JG
6297#ifdef CONFIG_PCI
6298
1da177e4
LT
6299/**
6300 * ata_pci_remove_one - PCI layer callback for device removal
6301 * @pdev: PCI device that was removed
6302 *
b878ca5d
TH
6303 * PCI layer indicates to libata via this hook that hot-unplug or
6304 * module unload event has occurred. Detach all ports. Resource
6305 * release is handled via devres.
1da177e4
LT
6306 *
6307 * LOCKING:
6308 * Inherited from PCI layer (may sleep).
6309 */
f0d36efd 6310void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6311{
6312 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6313 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6314
b878ca5d 6315 ata_host_detach(host);
1da177e4
LT
6316}
6317
6318/* move to PCI subsystem */
057ace5e 6319int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6320{
6321 unsigned long tmp = 0;
6322
6323 switch (bits->width) {
6324 case 1: {
6325 u8 tmp8 = 0;
6326 pci_read_config_byte(pdev, bits->reg, &tmp8);
6327 tmp = tmp8;
6328 break;
6329 }
6330 case 2: {
6331 u16 tmp16 = 0;
6332 pci_read_config_word(pdev, bits->reg, &tmp16);
6333 tmp = tmp16;
6334 break;
6335 }
6336 case 4: {
6337 u32 tmp32 = 0;
6338 pci_read_config_dword(pdev, bits->reg, &tmp32);
6339 tmp = tmp32;
6340 break;
6341 }
6342
6343 default:
6344 return -EINVAL;
6345 }
6346
6347 tmp &= bits->mask;
6348
6349 return (tmp == bits->val) ? 1 : 0;
6350}
9b847548 6351
6ffa01d8 6352#ifdef CONFIG_PM
3c5100c1 6353void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6354{
6355 pci_save_state(pdev);
4c90d971 6356 pci_disable_device(pdev);
500530f6 6357
4c90d971 6358 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6359 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6360}
6361
553c4aa6 6362int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6363{
553c4aa6
TH
6364 int rc;
6365
9b847548
JA
6366 pci_set_power_state(pdev, PCI_D0);
6367 pci_restore_state(pdev);
553c4aa6 6368
b878ca5d 6369 rc = pcim_enable_device(pdev);
553c4aa6
TH
6370 if (rc) {
6371 dev_printk(KERN_ERR, &pdev->dev,
6372 "failed to enable device after resume (%d)\n", rc);
6373 return rc;
6374 }
6375
9b847548 6376 pci_set_master(pdev);
553c4aa6 6377 return 0;
500530f6
TH
6378}
6379
3c5100c1 6380int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6381{
cca3974e 6382 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6383 int rc = 0;
6384
cca3974e 6385 rc = ata_host_suspend(host, mesg);
500530f6
TH
6386 if (rc)
6387 return rc;
6388
3c5100c1 6389 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6390
6391 return 0;
6392}
6393
6394int ata_pci_device_resume(struct pci_dev *pdev)
6395{
cca3974e 6396 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6397 int rc;
500530f6 6398
553c4aa6
TH
6399 rc = ata_pci_device_do_resume(pdev);
6400 if (rc == 0)
6401 ata_host_resume(host);
6402 return rc;
9b847548 6403}
6ffa01d8
TH
6404#endif /* CONFIG_PM */
6405
1da177e4
LT
6406#endif /* CONFIG_PCI */
6407
6408
1da177e4
LT
6409static int __init ata_init(void)
6410{
a8601e5f 6411 ata_probe_timeout *= HZ;
1da177e4
LT
6412 ata_wq = create_workqueue("ata");
6413 if (!ata_wq)
6414 return -ENOMEM;
6415
453b07ac
TH
6416 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6417 if (!ata_aux_wq) {
6418 destroy_workqueue(ata_wq);
6419 return -ENOMEM;
6420 }
6421
1da177e4
LT
6422 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6423 return 0;
6424}
6425
6426static void __exit ata_exit(void)
6427{
6428 destroy_workqueue(ata_wq);
453b07ac 6429 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6430}
6431
a4625085 6432subsys_initcall(ata_init);
1da177e4
LT
6433module_exit(ata_exit);
6434
67846b30 6435static unsigned long ratelimit_time;
34af946a 6436static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6437
6438int ata_ratelimit(void)
6439{
6440 int rc;
6441 unsigned long flags;
6442
6443 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6444
6445 if (time_after(jiffies, ratelimit_time)) {
6446 rc = 1;
6447 ratelimit_time = jiffies + (HZ/5);
6448 } else
6449 rc = 0;
6450
6451 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6452
6453 return rc;
6454}
6455
c22daff4
TH
6456/**
6457 * ata_wait_register - wait until register value changes
6458 * @reg: IO-mapped register
6459 * @mask: Mask to apply to read register value
6460 * @val: Wait condition
6461 * @interval_msec: polling interval in milliseconds
6462 * @timeout_msec: timeout in milliseconds
6463 *
6464 * Waiting for some bits of register to change is a common
6465 * operation for ATA controllers. This function reads 32bit LE
6466 * IO-mapped register @reg and tests for the following condition.
6467 *
6468 * (*@reg & mask) != val
6469 *
6470 * If the condition is met, it returns; otherwise, the process is
6471 * repeated after @interval_msec until timeout.
6472 *
6473 * LOCKING:
6474 * Kernel thread context (may sleep)
6475 *
6476 * RETURNS:
6477 * The final register value.
6478 */
6479u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6480 unsigned long interval_msec,
6481 unsigned long timeout_msec)
6482{
6483 unsigned long timeout;
6484 u32 tmp;
6485
6486 tmp = ioread32(reg);
6487
6488 /* Calculate timeout _after_ the first read to make sure
6489 * preceding writes reach the controller before starting to
6490 * eat away the timeout.
6491 */
6492 timeout = jiffies + (timeout_msec * HZ) / 1000;
6493
6494 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6495 msleep(interval_msec);
6496 tmp = ioread32(reg);
6497 }
6498
6499 return tmp;
6500}
6501
dd5b06c4
TH
6502/*
6503 * Dummy port_ops
6504 */
6505static void ata_dummy_noret(struct ata_port *ap) { }
6506static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6507static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6508
6509static u8 ata_dummy_check_status(struct ata_port *ap)
6510{
6511 return ATA_DRDY;
6512}
6513
6514static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6515{
6516 return AC_ERR_SYSTEM;
6517}
6518
6519const struct ata_port_operations ata_dummy_port_ops = {
6520 .port_disable = ata_port_disable,
6521 .check_status = ata_dummy_check_status,
6522 .check_altstatus = ata_dummy_check_status,
6523 .dev_select = ata_noop_dev_select,
6524 .qc_prep = ata_noop_qc_prep,
6525 .qc_issue = ata_dummy_qc_issue,
6526 .freeze = ata_dummy_noret,
6527 .thaw = ata_dummy_noret,
6528 .error_handler = ata_dummy_noret,
6529 .post_internal_cmd = ata_dummy_qc_noret,
6530 .irq_clear = ata_dummy_noret,
6531 .port_start = ata_dummy_ret0,
6532 .port_stop = ata_dummy_noret,
6533};
6534
1da177e4
LT
6535/*
6536 * libata is essentially a library of internal helper functions for
6537 * low-level ATA host controller drivers. As such, the API/ABI is
6538 * likely to change as new drivers are added and updated.
6539 * Do not depend on ABI/API stability.
6540 */
6541
e9c83914
TH
6542EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6543EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6544EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6545EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6546EXPORT_SYMBOL_GPL(ata_std_bios_param);
6547EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6548EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 6549EXPORT_SYMBOL_GPL(ata_host_alloc);
ecef7253 6550EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 6551EXPORT_SYMBOL_GPL(ata_host_register);
1da177e4 6552EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6553EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6554EXPORT_SYMBOL_GPL(ata_sg_init);
6555EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6556EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6557EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6558EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6559EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6560EXPORT_SYMBOL_GPL(ata_tf_load);
6561EXPORT_SYMBOL_GPL(ata_tf_read);
6562EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6563EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 6564EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
6565EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6566EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6567EXPORT_SYMBOL_GPL(ata_check_status);
6568EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6569EXPORT_SYMBOL_GPL(ata_exec_command);
6570EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6571EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 6572EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
6573EXPORT_SYMBOL_GPL(ata_data_xfer);
6574EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6575EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6576EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6577EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6578EXPORT_SYMBOL_GPL(ata_bmdma_start);
6579EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6580EXPORT_SYMBOL_GPL(ata_bmdma_status);
6581EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6582EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6583EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6584EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6585EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6586EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6587EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6588EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6589EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6590EXPORT_SYMBOL_GPL(sata_phy_debounce);
6591EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6592EXPORT_SYMBOL_GPL(sata_phy_reset);
6593EXPORT_SYMBOL_GPL(__sata_phy_reset);
6594EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6595EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6596EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6597EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6598EXPORT_SYMBOL_GPL(sata_std_hardreset);
6599EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6600EXPORT_SYMBOL_GPL(ata_dev_classify);
6601EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6602EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6603EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6604EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6605EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6606EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6607EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6608EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6609EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6610EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6611EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6612EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6613EXPORT_SYMBOL_GPL(sata_scr_valid);
6614EXPORT_SYMBOL_GPL(sata_scr_read);
6615EXPORT_SYMBOL_GPL(sata_scr_write);
6616EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6617EXPORT_SYMBOL_GPL(ata_port_online);
6618EXPORT_SYMBOL_GPL(ata_port_offline);
6ffa01d8 6619#ifdef CONFIG_PM
cca3974e
JG
6620EXPORT_SYMBOL_GPL(ata_host_suspend);
6621EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6622#endif /* CONFIG_PM */
6a62a04d
TH
6623EXPORT_SYMBOL_GPL(ata_id_string);
6624EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 6625EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6919a0a6 6626EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6627EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6628
1bc4ccff 6629EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6630EXPORT_SYMBOL_GPL(ata_timing_compute);
6631EXPORT_SYMBOL_GPL(ata_timing_merge);
6632
1da177e4
LT
6633#ifdef CONFIG_PCI
6634EXPORT_SYMBOL_GPL(pci_test_config_bits);
6635EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6636EXPORT_SYMBOL_GPL(ata_pci_init_one);
6637EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6638#ifdef CONFIG_PM
500530f6
TH
6639EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6640EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6641EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6642EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6643#endif /* CONFIG_PM */
67951ade
AC
6644EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6645EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6646#endif /* CONFIG_PCI */
9b847548 6647
6ffa01d8 6648#ifdef CONFIG_PM
9b847548
JA
6649EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6650EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6ffa01d8 6651#endif /* CONFIG_PM */
ece1d636 6652
ece1d636 6653EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6654EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6655EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6656EXPORT_SYMBOL_GPL(ata_port_freeze);
6657EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6658EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6659EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6660EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6661EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6662EXPORT_SYMBOL_GPL(ata_irq_on);
6663EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6664EXPORT_SYMBOL_GPL(ata_irq_ack);
6665EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6666EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
6667
6668EXPORT_SYMBOL_GPL(ata_cable_40wire);
6669EXPORT_SYMBOL_GPL(ata_cable_80wire);
6670EXPORT_SYMBOL_GPL(ata_cable_unknown);
6671EXPORT_SYMBOL_GPL(ata_cable_sata);