]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/ata/libata-core.c
pata_sl82c105: wrong assumptions about compatible PIO modes
[mirror_ubuntu-eoan-kernel.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5
JG
62#define DRV_VERSION "2.10" /* must be exactly four chars */
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4
LT
74
75static unsigned int ata_unique_id = 1;
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
1da177e4
LT
96MODULE_AUTHOR("Jeff Garzik");
97MODULE_DESCRIPTION("Library module for ATA devices");
98MODULE_LICENSE("GPL");
99MODULE_VERSION(DRV_VERSION);
100
0baab86b 101
1da177e4
LT
102/**
103 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
104 * @tf: Taskfile to convert
105 * @fis: Buffer into which data will output
106 * @pmp: Port multiplier port
107 *
108 * Converts a standard ATA taskfile to a Serial ATA
109 * FIS structure (Register - Host to Device).
110 *
111 * LOCKING:
112 * Inherited from caller.
113 */
114
057ace5e 115void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
116{
117 fis[0] = 0x27; /* Register - Host to Device FIS */
118 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
119 bit 7 indicates Command FIS */
120 fis[2] = tf->command;
121 fis[3] = tf->feature;
122
123 fis[4] = tf->lbal;
124 fis[5] = tf->lbam;
125 fis[6] = tf->lbah;
126 fis[7] = tf->device;
127
128 fis[8] = tf->hob_lbal;
129 fis[9] = tf->hob_lbam;
130 fis[10] = tf->hob_lbah;
131 fis[11] = tf->hob_feature;
132
133 fis[12] = tf->nsect;
134 fis[13] = tf->hob_nsect;
135 fis[14] = 0;
136 fis[15] = tf->ctl;
137
138 fis[16] = 0;
139 fis[17] = 0;
140 fis[18] = 0;
141 fis[19] = 0;
142}
143
144/**
145 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
146 * @fis: Buffer from which data will be input
147 * @tf: Taskfile to output
148 *
e12a1be6 149 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
150 *
151 * LOCKING:
152 * Inherited from caller.
153 */
154
057ace5e 155void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
156{
157 tf->command = fis[2]; /* status */
158 tf->feature = fis[3]; /* error */
159
160 tf->lbal = fis[4];
161 tf->lbam = fis[5];
162 tf->lbah = fis[6];
163 tf->device = fis[7];
164
165 tf->hob_lbal = fis[8];
166 tf->hob_lbam = fis[9];
167 tf->hob_lbah = fis[10];
168
169 tf->nsect = fis[12];
170 tf->hob_nsect = fis[13];
171}
172
8cbd6df1
AL
173static const u8 ata_rw_cmds[] = {
174 /* pio multi */
175 ATA_CMD_READ_MULTI,
176 ATA_CMD_WRITE_MULTI,
177 ATA_CMD_READ_MULTI_EXT,
178 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
179 0,
180 0,
181 0,
182 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
183 /* pio */
184 ATA_CMD_PIO_READ,
185 ATA_CMD_PIO_WRITE,
186 ATA_CMD_PIO_READ_EXT,
187 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
188 0,
189 0,
190 0,
191 0,
8cbd6df1
AL
192 /* dma */
193 ATA_CMD_READ,
194 ATA_CMD_WRITE,
195 ATA_CMD_READ_EXT,
9a3dccc4
TH
196 ATA_CMD_WRITE_EXT,
197 0,
198 0,
199 0,
200 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 201};
1da177e4
LT
202
203/**
8cbd6df1 204 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
205 * @tf: command to examine and configure
206 * @dev: device tf belongs to
1da177e4 207 *
2e9edbf8 208 * Examine the device configuration and tf->flags to calculate
8cbd6df1 209 * the proper read/write commands and protocol to use.
1da177e4
LT
210 *
211 * LOCKING:
212 * caller.
213 */
bd056d7e 214static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 215{
9a3dccc4 216 u8 cmd;
1da177e4 217
9a3dccc4 218 int index, fua, lba48, write;
2e9edbf8 219
9a3dccc4 220 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
221 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
222 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 223
8cbd6df1
AL
224 if (dev->flags & ATA_DFLAG_PIO) {
225 tf->protocol = ATA_PROT_PIO;
9a3dccc4 226 index = dev->multi_count ? 0 : 8;
bd056d7e 227 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
228 /* Unable to use DMA due to host limitation */
229 tf->protocol = ATA_PROT_PIO;
0565c26d 230 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
231 } else {
232 tf->protocol = ATA_PROT_DMA;
9a3dccc4 233 index = 16;
8cbd6df1 234 }
1da177e4 235
9a3dccc4
TH
236 cmd = ata_rw_cmds[index + fua + lba48 + write];
237 if (cmd) {
238 tf->command = cmd;
239 return 0;
240 }
241 return -1;
1da177e4
LT
242}
243
35b649fe
TH
244/**
245 * ata_tf_read_block - Read block address from ATA taskfile
246 * @tf: ATA taskfile of interest
247 * @dev: ATA device @tf belongs to
248 *
249 * LOCKING:
250 * None.
251 *
252 * Read block address from @tf. This function can handle all
253 * three address formats - LBA, LBA48 and CHS. tf->protocol and
254 * flags select the address format to use.
255 *
256 * RETURNS:
257 * Block address read from @tf.
258 */
259u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
260{
261 u64 block = 0;
262
263 if (tf->flags & ATA_TFLAG_LBA) {
264 if (tf->flags & ATA_TFLAG_LBA48) {
265 block |= (u64)tf->hob_lbah << 40;
266 block |= (u64)tf->hob_lbam << 32;
267 block |= tf->hob_lbal << 24;
268 } else
269 block |= (tf->device & 0xf) << 24;
270
271 block |= tf->lbah << 16;
272 block |= tf->lbam << 8;
273 block |= tf->lbal;
274 } else {
275 u32 cyl, head, sect;
276
277 cyl = tf->lbam | (tf->lbah << 8);
278 head = tf->device & 0xf;
279 sect = tf->lbal;
280
281 block = (cyl * dev->heads + head) * dev->sectors + sect;
282 }
283
284 return block;
285}
286
bd056d7e
TH
287/**
288 * ata_build_rw_tf - Build ATA taskfile for given read/write request
289 * @tf: Target ATA taskfile
290 * @dev: ATA device @tf belongs to
291 * @block: Block address
292 * @n_block: Number of blocks
293 * @tf_flags: RW/FUA etc...
294 * @tag: tag
295 *
296 * LOCKING:
297 * None.
298 *
299 * Build ATA taskfile @tf for read/write request described by
300 * @block, @n_block, @tf_flags and @tag on @dev.
301 *
302 * RETURNS:
303 *
304 * 0 on success, -ERANGE if the request is too large for @dev,
305 * -EINVAL if the request is invalid.
306 */
307int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
308 u64 block, u32 n_block, unsigned int tf_flags,
309 unsigned int tag)
310{
311 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
312 tf->flags |= tf_flags;
313
314 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
70e6ad0c
TH
315 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
316 likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
317 /* yay, NCQ */
318 if (!lba_48_ok(block, n_block))
319 return -ERANGE;
320
321 tf->protocol = ATA_PROT_NCQ;
322 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
323
324 if (tf->flags & ATA_TFLAG_WRITE)
325 tf->command = ATA_CMD_FPDMA_WRITE;
326 else
327 tf->command = ATA_CMD_FPDMA_READ;
328
329 tf->nsect = tag << 3;
330 tf->hob_feature = (n_block >> 8) & 0xff;
331 tf->feature = n_block & 0xff;
332
333 tf->hob_lbah = (block >> 40) & 0xff;
334 tf->hob_lbam = (block >> 32) & 0xff;
335 tf->hob_lbal = (block >> 24) & 0xff;
336 tf->lbah = (block >> 16) & 0xff;
337 tf->lbam = (block >> 8) & 0xff;
338 tf->lbal = block & 0xff;
339
340 tf->device = 1 << 6;
341 if (tf->flags & ATA_TFLAG_FUA)
342 tf->device |= 1 << 7;
343 } else if (dev->flags & ATA_DFLAG_LBA) {
344 tf->flags |= ATA_TFLAG_LBA;
345
346 if (lba_28_ok(block, n_block)) {
347 /* use LBA28 */
348 tf->device |= (block >> 24) & 0xf;
349 } else if (lba_48_ok(block, n_block)) {
350 if (!(dev->flags & ATA_DFLAG_LBA48))
351 return -ERANGE;
352
353 /* use LBA48 */
354 tf->flags |= ATA_TFLAG_LBA48;
355
356 tf->hob_nsect = (n_block >> 8) & 0xff;
357
358 tf->hob_lbah = (block >> 40) & 0xff;
359 tf->hob_lbam = (block >> 32) & 0xff;
360 tf->hob_lbal = (block >> 24) & 0xff;
361 } else
362 /* request too large even for LBA48 */
363 return -ERANGE;
364
365 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
366 return -EINVAL;
367
368 tf->nsect = n_block & 0xff;
369
370 tf->lbah = (block >> 16) & 0xff;
371 tf->lbam = (block >> 8) & 0xff;
372 tf->lbal = block & 0xff;
373
374 tf->device |= ATA_LBA;
375 } else {
376 /* CHS */
377 u32 sect, head, cyl, track;
378
379 /* The request -may- be too large for CHS addressing. */
380 if (!lba_28_ok(block, n_block))
381 return -ERANGE;
382
383 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
384 return -EINVAL;
385
386 /* Convert LBA to CHS */
387 track = (u32)block / dev->sectors;
388 cyl = track / dev->heads;
389 head = track % dev->heads;
390 sect = (u32)block % dev->sectors + 1;
391
392 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
393 (u32)block, track, cyl, head, sect);
394
395 /* Check whether the converted CHS can fit.
396 Cylinder: 0-65535
397 Head: 0-15
398 Sector: 1-255*/
399 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
400 return -ERANGE;
401
402 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
403 tf->lbal = sect;
404 tf->lbam = cyl;
405 tf->lbah = cyl >> 8;
406 tf->device |= head;
407 }
408
409 return 0;
410}
411
cb95d562
TH
412/**
413 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
414 * @pio_mask: pio_mask
415 * @mwdma_mask: mwdma_mask
416 * @udma_mask: udma_mask
417 *
418 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
419 * unsigned int xfer_mask.
420 *
421 * LOCKING:
422 * None.
423 *
424 * RETURNS:
425 * Packed xfer_mask.
426 */
427static unsigned int ata_pack_xfermask(unsigned int pio_mask,
428 unsigned int mwdma_mask,
429 unsigned int udma_mask)
430{
431 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
432 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
433 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
434}
435
c0489e4e
TH
436/**
437 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
438 * @xfer_mask: xfer_mask to unpack
439 * @pio_mask: resulting pio_mask
440 * @mwdma_mask: resulting mwdma_mask
441 * @udma_mask: resulting udma_mask
442 *
443 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
444 * Any NULL distination masks will be ignored.
445 */
446static void ata_unpack_xfermask(unsigned int xfer_mask,
447 unsigned int *pio_mask,
448 unsigned int *mwdma_mask,
449 unsigned int *udma_mask)
450{
451 if (pio_mask)
452 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
453 if (mwdma_mask)
454 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
455 if (udma_mask)
456 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
457}
458
cb95d562 459static const struct ata_xfer_ent {
be9a50c8 460 int shift, bits;
cb95d562
TH
461 u8 base;
462} ata_xfer_tbl[] = {
463 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
464 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
465 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
466 { -1, },
467};
468
469/**
470 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
471 * @xfer_mask: xfer_mask of interest
472 *
473 * Return matching XFER_* value for @xfer_mask. Only the highest
474 * bit of @xfer_mask is considered.
475 *
476 * LOCKING:
477 * None.
478 *
479 * RETURNS:
480 * Matching XFER_* value, 0 if no match found.
481 */
482static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
483{
484 int highbit = fls(xfer_mask) - 1;
485 const struct ata_xfer_ent *ent;
486
487 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
488 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
489 return ent->base + highbit - ent->shift;
490 return 0;
491}
492
493/**
494 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
495 * @xfer_mode: XFER_* of interest
496 *
497 * Return matching xfer_mask for @xfer_mode.
498 *
499 * LOCKING:
500 * None.
501 *
502 * RETURNS:
503 * Matching xfer_mask, 0 if no match found.
504 */
505static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
506{
507 const struct ata_xfer_ent *ent;
508
509 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
510 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
511 return 1 << (ent->shift + xfer_mode - ent->base);
512 return 0;
513}
514
515/**
516 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
517 * @xfer_mode: XFER_* of interest
518 *
519 * Return matching xfer_shift for @xfer_mode.
520 *
521 * LOCKING:
522 * None.
523 *
524 * RETURNS:
525 * Matching xfer_shift, -1 if no match found.
526 */
527static int ata_xfer_mode2shift(unsigned int xfer_mode)
528{
529 const struct ata_xfer_ent *ent;
530
531 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
532 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
533 return ent->shift;
534 return -1;
535}
536
1da177e4 537/**
1da7b0d0
TH
538 * ata_mode_string - convert xfer_mask to string
539 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
540 *
541 * Determine string which represents the highest speed
1da7b0d0 542 * (highest bit in @modemask).
1da177e4
LT
543 *
544 * LOCKING:
545 * None.
546 *
547 * RETURNS:
548 * Constant C string representing highest speed listed in
1da7b0d0 549 * @mode_mask, or the constant C string "<n/a>".
1da177e4 550 */
1da7b0d0 551static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 552{
75f554bc
TH
553 static const char * const xfer_mode_str[] = {
554 "PIO0",
555 "PIO1",
556 "PIO2",
557 "PIO3",
558 "PIO4",
b352e57d
AC
559 "PIO5",
560 "PIO6",
75f554bc
TH
561 "MWDMA0",
562 "MWDMA1",
563 "MWDMA2",
b352e57d
AC
564 "MWDMA3",
565 "MWDMA4",
75f554bc
TH
566 "UDMA/16",
567 "UDMA/25",
568 "UDMA/33",
569 "UDMA/44",
570 "UDMA/66",
571 "UDMA/100",
572 "UDMA/133",
573 "UDMA7",
574 };
1da7b0d0 575 int highbit;
1da177e4 576
1da7b0d0
TH
577 highbit = fls(xfer_mask) - 1;
578 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
579 return xfer_mode_str[highbit];
1da177e4 580 return "<n/a>";
1da177e4
LT
581}
582
4c360c81
TH
583static const char *sata_spd_string(unsigned int spd)
584{
585 static const char * const spd_str[] = {
586 "1.5 Gbps",
587 "3.0 Gbps",
588 };
589
590 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
591 return "<unknown>";
592 return spd_str[spd - 1];
593}
594
3373efd8 595void ata_dev_disable(struct ata_device *dev)
0b8efb0a 596{
0dd4b21f 597 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 598 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
0b8efb0a
TH
599 dev->class++;
600 }
601}
602
1da177e4 603/**
0d5ff566 604 * ata_devchk - PATA device presence detection
1da177e4
LT
605 * @ap: ATA channel to examine
606 * @device: Device to examine (starting at zero)
607 *
608 * This technique was originally described in
609 * Hale Landis's ATADRVR (www.ata-atapi.com), and
610 * later found its way into the ATA/ATAPI spec.
611 *
612 * Write a pattern to the ATA shadow registers,
613 * and if a device is present, it will respond by
614 * correctly storing and echoing back the
615 * ATA shadow register contents.
616 *
617 * LOCKING:
618 * caller.
619 */
620
0d5ff566 621static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
622{
623 struct ata_ioports *ioaddr = &ap->ioaddr;
624 u8 nsect, lbal;
625
626 ap->ops->dev_select(ap, device);
627
0d5ff566
TH
628 iowrite8(0x55, ioaddr->nsect_addr);
629 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 630
0d5ff566
TH
631 iowrite8(0xaa, ioaddr->nsect_addr);
632 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 633
0d5ff566
TH
634 iowrite8(0x55, ioaddr->nsect_addr);
635 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 636
0d5ff566
TH
637 nsect = ioread8(ioaddr->nsect_addr);
638 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
639
640 if ((nsect == 0x55) && (lbal == 0xaa))
641 return 1; /* we found a device */
642
643 return 0; /* nothing found */
644}
645
1da177e4
LT
646/**
647 * ata_dev_classify - determine device type based on ATA-spec signature
648 * @tf: ATA taskfile register set for device to be identified
649 *
650 * Determine from taskfile register contents whether a device is
651 * ATA or ATAPI, as per "Signature and persistence" section
652 * of ATA/PI spec (volume 1, sect 5.14).
653 *
654 * LOCKING:
655 * None.
656 *
657 * RETURNS:
658 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
659 * the event of failure.
660 */
661
057ace5e 662unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
663{
664 /* Apple's open source Darwin code hints that some devices only
665 * put a proper signature into the LBA mid/high registers,
666 * So, we only check those. It's sufficient for uniqueness.
667 */
668
669 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
670 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
671 DPRINTK("found ATA device by sig\n");
672 return ATA_DEV_ATA;
673 }
674
675 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
676 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
677 DPRINTK("found ATAPI device by sig\n");
678 return ATA_DEV_ATAPI;
679 }
680
681 DPRINTK("unknown device\n");
682 return ATA_DEV_UNKNOWN;
683}
684
685/**
686 * ata_dev_try_classify - Parse returned ATA device signature
687 * @ap: ATA channel to examine
688 * @device: Device to examine (starting at zero)
b4dc7623 689 * @r_err: Value of error register on completion
1da177e4
LT
690 *
691 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
692 * an ATA/ATAPI-defined set of values is placed in the ATA
693 * shadow registers, indicating the results of device detection
694 * and diagnostics.
695 *
696 * Select the ATA device, and read the values from the ATA shadow
697 * registers. Then parse according to the Error register value,
698 * and the spec-defined values examined by ata_dev_classify().
699 *
700 * LOCKING:
701 * caller.
b4dc7623
TH
702 *
703 * RETURNS:
704 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
705 */
706
b4dc7623
TH
707static unsigned int
708ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 709{
1da177e4
LT
710 struct ata_taskfile tf;
711 unsigned int class;
712 u8 err;
713
714 ap->ops->dev_select(ap, device);
715
716 memset(&tf, 0, sizeof(tf));
717
1da177e4 718 ap->ops->tf_read(ap, &tf);
0169e284 719 err = tf.feature;
b4dc7623
TH
720 if (r_err)
721 *r_err = err;
1da177e4 722
93590859
AC
723 /* see if device passed diags: if master then continue and warn later */
724 if (err == 0 && device == 0)
725 /* diagnostic fail : do nothing _YET_ */
726 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
727 else if (err == 1)
1da177e4
LT
728 /* do nothing */ ;
729 else if ((device == 0) && (err == 0x81))
730 /* do nothing */ ;
731 else
b4dc7623 732 return ATA_DEV_NONE;
1da177e4 733
b4dc7623 734 /* determine if device is ATA or ATAPI */
1da177e4 735 class = ata_dev_classify(&tf);
b4dc7623 736
1da177e4 737 if (class == ATA_DEV_UNKNOWN)
b4dc7623 738 return ATA_DEV_NONE;
1da177e4 739 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
740 return ATA_DEV_NONE;
741 return class;
1da177e4
LT
742}
743
744/**
6a62a04d 745 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
746 * @id: IDENTIFY DEVICE results we will examine
747 * @s: string into which data is output
748 * @ofs: offset into identify device page
749 * @len: length of string to return. must be an even number.
750 *
751 * The strings in the IDENTIFY DEVICE page are broken up into
752 * 16-bit chunks. Run through the string, and output each
753 * 8-bit chunk linearly, regardless of platform.
754 *
755 * LOCKING:
756 * caller.
757 */
758
6a62a04d
TH
759void ata_id_string(const u16 *id, unsigned char *s,
760 unsigned int ofs, unsigned int len)
1da177e4
LT
761{
762 unsigned int c;
763
764 while (len > 0) {
765 c = id[ofs] >> 8;
766 *s = c;
767 s++;
768
769 c = id[ofs] & 0xff;
770 *s = c;
771 s++;
772
773 ofs++;
774 len -= 2;
775 }
776}
777
0e949ff3 778/**
6a62a04d 779 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
780 * @id: IDENTIFY DEVICE results we will examine
781 * @s: string into which data is output
782 * @ofs: offset into identify device page
783 * @len: length of string to return. must be an odd number.
784 *
6a62a04d 785 * This function is identical to ata_id_string except that it
0e949ff3
TH
786 * trims trailing spaces and terminates the resulting string with
787 * null. @len must be actual maximum length (even number) + 1.
788 *
789 * LOCKING:
790 * caller.
791 */
6a62a04d
TH
792void ata_id_c_string(const u16 *id, unsigned char *s,
793 unsigned int ofs, unsigned int len)
0e949ff3
TH
794{
795 unsigned char *p;
796
797 WARN_ON(!(len & 1));
798
6a62a04d 799 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
800
801 p = s + strnlen(s, len - 1);
802 while (p > s && p[-1] == ' ')
803 p--;
804 *p = '\0';
805}
0baab86b 806
2940740b
TH
807static u64 ata_id_n_sectors(const u16 *id)
808{
809 if (ata_id_has_lba(id)) {
810 if (ata_id_has_lba48(id))
811 return ata_id_u64(id, 100);
812 else
813 return ata_id_u32(id, 60);
814 } else {
815 if (ata_id_current_chs_valid(id))
816 return ata_id_u32(id, 57);
817 else
818 return id[1] * id[3] * id[6];
819 }
820}
821
0baab86b
EF
822/**
823 * ata_noop_dev_select - Select device 0/1 on ATA bus
824 * @ap: ATA channel to manipulate
825 * @device: ATA device (numbered from zero) to select
826 *
827 * This function performs no actual function.
828 *
829 * May be used as the dev_select() entry in ata_port_operations.
830 *
831 * LOCKING:
832 * caller.
833 */
1da177e4
LT
834void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
835{
836}
837
0baab86b 838
1da177e4
LT
839/**
840 * ata_std_dev_select - Select device 0/1 on ATA bus
841 * @ap: ATA channel to manipulate
842 * @device: ATA device (numbered from zero) to select
843 *
844 * Use the method defined in the ATA specification to
845 * make either device 0, or device 1, active on the
0baab86b
EF
846 * ATA channel. Works with both PIO and MMIO.
847 *
848 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
849 *
850 * LOCKING:
851 * caller.
852 */
853
854void ata_std_dev_select (struct ata_port *ap, unsigned int device)
855{
856 u8 tmp;
857
858 if (device == 0)
859 tmp = ATA_DEVICE_OBS;
860 else
861 tmp = ATA_DEVICE_OBS | ATA_DEV1;
862
0d5ff566 863 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
864 ata_pause(ap); /* needed; also flushes, for mmio */
865}
866
867/**
868 * ata_dev_select - Select device 0/1 on ATA bus
869 * @ap: ATA channel to manipulate
870 * @device: ATA device (numbered from zero) to select
871 * @wait: non-zero to wait for Status register BSY bit to clear
872 * @can_sleep: non-zero if context allows sleeping
873 *
874 * Use the method defined in the ATA specification to
875 * make either device 0, or device 1, active on the
876 * ATA channel.
877 *
878 * This is a high-level version of ata_std_dev_select(),
879 * which additionally provides the services of inserting
880 * the proper pauses and status polling, where needed.
881 *
882 * LOCKING:
883 * caller.
884 */
885
886void ata_dev_select(struct ata_port *ap, unsigned int device,
887 unsigned int wait, unsigned int can_sleep)
888{
88574551 889 if (ata_msg_probe(ap))
0dd4b21f 890 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
88574551 891 "device %u, wait %u\n", ap->id, device, wait);
1da177e4
LT
892
893 if (wait)
894 ata_wait_idle(ap);
895
896 ap->ops->dev_select(ap, device);
897
898 if (wait) {
899 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
900 msleep(150);
901 ata_wait_idle(ap);
902 }
903}
904
905/**
906 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 907 * @id: IDENTIFY DEVICE page to dump
1da177e4 908 *
0bd3300a
TH
909 * Dump selected 16-bit words from the given IDENTIFY DEVICE
910 * page.
1da177e4
LT
911 *
912 * LOCKING:
913 * caller.
914 */
915
0bd3300a 916static inline void ata_dump_id(const u16 *id)
1da177e4
LT
917{
918 DPRINTK("49==0x%04x "
919 "53==0x%04x "
920 "63==0x%04x "
921 "64==0x%04x "
922 "75==0x%04x \n",
0bd3300a
TH
923 id[49],
924 id[53],
925 id[63],
926 id[64],
927 id[75]);
1da177e4
LT
928 DPRINTK("80==0x%04x "
929 "81==0x%04x "
930 "82==0x%04x "
931 "83==0x%04x "
932 "84==0x%04x \n",
0bd3300a
TH
933 id[80],
934 id[81],
935 id[82],
936 id[83],
937 id[84]);
1da177e4
LT
938 DPRINTK("88==0x%04x "
939 "93==0x%04x\n",
0bd3300a
TH
940 id[88],
941 id[93]);
1da177e4
LT
942}
943
cb95d562
TH
944/**
945 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
946 * @id: IDENTIFY data to compute xfer mask from
947 *
948 * Compute the xfermask for this device. This is not as trivial
949 * as it seems if we must consider early devices correctly.
950 *
951 * FIXME: pre IDE drive timing (do we care ?).
952 *
953 * LOCKING:
954 * None.
955 *
956 * RETURNS:
957 * Computed xfermask
958 */
959static unsigned int ata_id_xfermask(const u16 *id)
960{
961 unsigned int pio_mask, mwdma_mask, udma_mask;
962
963 /* Usual case. Word 53 indicates word 64 is valid */
964 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
965 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
966 pio_mask <<= 3;
967 pio_mask |= 0x7;
968 } else {
969 /* If word 64 isn't valid then Word 51 high byte holds
970 * the PIO timing number for the maximum. Turn it into
971 * a mask.
972 */
7a0f1c8a 973 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
974 if (mode < 5) /* Valid PIO range */
975 pio_mask = (2 << mode) - 1;
976 else
977 pio_mask = 1;
cb95d562
TH
978
979 /* But wait.. there's more. Design your standards by
980 * committee and you too can get a free iordy field to
981 * process. However its the speeds not the modes that
982 * are supported... Note drivers using the timing API
983 * will get this right anyway
984 */
985 }
986
987 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 988
b352e57d
AC
989 if (ata_id_is_cfa(id)) {
990 /*
991 * Process compact flash extended modes
992 */
993 int pio = id[163] & 0x7;
994 int dma = (id[163] >> 3) & 7;
995
996 if (pio)
997 pio_mask |= (1 << 5);
998 if (pio > 1)
999 pio_mask |= (1 << 6);
1000 if (dma)
1001 mwdma_mask |= (1 << 3);
1002 if (dma > 1)
1003 mwdma_mask |= (1 << 4);
1004 }
1005
fb21f0d0
TH
1006 udma_mask = 0;
1007 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1008 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1009
1010 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1011}
1012
86e45b6b
TH
1013/**
1014 * ata_port_queue_task - Queue port_task
1015 * @ap: The ata_port to queue port_task for
e2a7f77a 1016 * @fn: workqueue function to be scheduled
65f27f38 1017 * @data: data for @fn to use
e2a7f77a 1018 * @delay: delay time for workqueue function
86e45b6b
TH
1019 *
1020 * Schedule @fn(@data) for execution after @delay jiffies using
1021 * port_task. There is one port_task per port and it's the
1022 * user(low level driver)'s responsibility to make sure that only
1023 * one task is active at any given time.
1024 *
1025 * libata core layer takes care of synchronization between
1026 * port_task and EH. ata_port_queue_task() may be ignored for EH
1027 * synchronization.
1028 *
1029 * LOCKING:
1030 * Inherited from caller.
1031 */
65f27f38 1032void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1033 unsigned long delay)
1034{
1035 int rc;
1036
b51e9e5d 1037 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1038 return;
1039
65f27f38
DH
1040 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1041 ap->port_task_data = data;
86e45b6b 1042
52bad64d 1043 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1044
1045 /* rc == 0 means that another user is using port task */
1046 WARN_ON(rc == 0);
1047}
1048
1049/**
1050 * ata_port_flush_task - Flush port_task
1051 * @ap: The ata_port to flush port_task for
1052 *
1053 * After this function completes, port_task is guranteed not to
1054 * be running or scheduled.
1055 *
1056 * LOCKING:
1057 * Kernel thread context (may sleep)
1058 */
1059void ata_port_flush_task(struct ata_port *ap)
1060{
1061 unsigned long flags;
1062
1063 DPRINTK("ENTER\n");
1064
ba6a1308 1065 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1066 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1067 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1068
1069 DPRINTK("flush #1\n");
1070 flush_workqueue(ata_wq);
1071
1072 /*
1073 * At this point, if a task is running, it's guaranteed to see
1074 * the FLUSH flag; thus, it will never queue pio tasks again.
1075 * Cancel and flush.
1076 */
1077 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1078 if (ata_msg_ctl(ap))
88574551
TH
1079 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1080 __FUNCTION__);
86e45b6b
TH
1081 flush_workqueue(ata_wq);
1082 }
1083
ba6a1308 1084 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1085 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1086 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1087
0dd4b21f
BP
1088 if (ata_msg_ctl(ap))
1089 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1090}
1091
7102d230 1092static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1093{
77853bf2 1094 struct completion *waiting = qc->private_data;
a2a7a662 1095
a2a7a662 1096 complete(waiting);
a2a7a662
TH
1097}
1098
1099/**
2432697b 1100 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1101 * @dev: Device to which the command is sent
1102 * @tf: Taskfile registers for the command and the result
d69cf37d 1103 * @cdb: CDB for packet command
a2a7a662 1104 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1105 * @sg: sg list for the data buffer of the command
1106 * @n_elem: Number of sg entries
a2a7a662
TH
1107 *
1108 * Executes libata internal command with timeout. @tf contains
1109 * command on entry and result on return. Timeout and error
1110 * conditions are reported via return value. No recovery action
1111 * is taken after a command times out. It's caller's duty to
1112 * clean up after timeout.
1113 *
1114 * LOCKING:
1115 * None. Should be called with kernel context, might sleep.
551e8889
TH
1116 *
1117 * RETURNS:
1118 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1119 */
2432697b
TH
1120unsigned ata_exec_internal_sg(struct ata_device *dev,
1121 struct ata_taskfile *tf, const u8 *cdb,
1122 int dma_dir, struct scatterlist *sg,
1123 unsigned int n_elem)
a2a7a662 1124{
3373efd8 1125 struct ata_port *ap = dev->ap;
a2a7a662
TH
1126 u8 command = tf->command;
1127 struct ata_queued_cmd *qc;
2ab7db1f 1128 unsigned int tag, preempted_tag;
dedaf2b0 1129 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1130 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1131 unsigned long flags;
77853bf2 1132 unsigned int err_mask;
d95a717f 1133 int rc;
a2a7a662 1134
ba6a1308 1135 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1136
e3180499 1137 /* no internal command while frozen */
b51e9e5d 1138 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1139 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1140 return AC_ERR_SYSTEM;
1141 }
1142
2ab7db1f 1143 /* initialize internal qc */
a2a7a662 1144
2ab7db1f
TH
1145 /* XXX: Tag 0 is used for drivers with legacy EH as some
1146 * drivers choke if any other tag is given. This breaks
1147 * ata_tag_internal() test for those drivers. Don't use new
1148 * EH stuff without converting to it.
1149 */
1150 if (ap->ops->error_handler)
1151 tag = ATA_TAG_INTERNAL;
1152 else
1153 tag = 0;
1154
6cec4a39 1155 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1156 BUG();
f69499f4 1157 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1158
1159 qc->tag = tag;
1160 qc->scsicmd = NULL;
1161 qc->ap = ap;
1162 qc->dev = dev;
1163 ata_qc_reinit(qc);
1164
1165 preempted_tag = ap->active_tag;
dedaf2b0
TH
1166 preempted_sactive = ap->sactive;
1167 preempted_qc_active = ap->qc_active;
2ab7db1f 1168 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1169 ap->sactive = 0;
1170 ap->qc_active = 0;
2ab7db1f
TH
1171
1172 /* prepare & issue qc */
a2a7a662 1173 qc->tf = *tf;
d69cf37d
TH
1174 if (cdb)
1175 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1176 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1177 qc->dma_dir = dma_dir;
1178 if (dma_dir != DMA_NONE) {
2432697b
TH
1179 unsigned int i, buflen = 0;
1180
1181 for (i = 0; i < n_elem; i++)
1182 buflen += sg[i].length;
1183
1184 ata_sg_init(qc, sg, n_elem);
49c80429 1185 qc->nbytes = buflen;
a2a7a662
TH
1186 }
1187
77853bf2 1188 qc->private_data = &wait;
a2a7a662
TH
1189 qc->complete_fn = ata_qc_complete_internal;
1190
8e0e694a 1191 ata_qc_issue(qc);
a2a7a662 1192
ba6a1308 1193 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1194
a8601e5f 1195 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1196
1197 ata_port_flush_task(ap);
41ade50c 1198
d95a717f 1199 if (!rc) {
ba6a1308 1200 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1201
1202 /* We're racing with irq here. If we lose, the
1203 * following test prevents us from completing the qc
d95a717f
TH
1204 * twice. If we win, the port is frozen and will be
1205 * cleaned up by ->post_internal_cmd().
a2a7a662 1206 */
77853bf2 1207 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1208 qc->err_mask |= AC_ERR_TIMEOUT;
1209
1210 if (ap->ops->error_handler)
1211 ata_port_freeze(ap);
1212 else
1213 ata_qc_complete(qc);
f15a1daf 1214
0dd4b21f
BP
1215 if (ata_msg_warn(ap))
1216 ata_dev_printk(dev, KERN_WARNING,
88574551 1217 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1218 }
1219
ba6a1308 1220 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1221 }
1222
d95a717f
TH
1223 /* do post_internal_cmd */
1224 if (ap->ops->post_internal_cmd)
1225 ap->ops->post_internal_cmd(qc);
1226
18d90deb 1227 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
0dd4b21f 1228 if (ata_msg_warn(ap))
88574551 1229 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1230 "zero err_mask for failed "
88574551 1231 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1232 qc->err_mask |= AC_ERR_OTHER;
1233 }
1234
15869303 1235 /* finish up */
ba6a1308 1236 spin_lock_irqsave(ap->lock, flags);
15869303 1237
e61e0672 1238 *tf = qc->result_tf;
77853bf2
TH
1239 err_mask = qc->err_mask;
1240
1241 ata_qc_free(qc);
2ab7db1f 1242 ap->active_tag = preempted_tag;
dedaf2b0
TH
1243 ap->sactive = preempted_sactive;
1244 ap->qc_active = preempted_qc_active;
77853bf2 1245
1f7dd3e9
TH
1246 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1247 * Until those drivers are fixed, we detect the condition
1248 * here, fail the command with AC_ERR_SYSTEM and reenable the
1249 * port.
1250 *
1251 * Note that this doesn't change any behavior as internal
1252 * command failure results in disabling the device in the
1253 * higher layer for LLDDs without new reset/EH callbacks.
1254 *
1255 * Kill the following code as soon as those drivers are fixed.
1256 */
198e0fed 1257 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1258 err_mask |= AC_ERR_SYSTEM;
1259 ata_port_probe(ap);
1260 }
1261
ba6a1308 1262 spin_unlock_irqrestore(ap->lock, flags);
15869303 1263
77853bf2 1264 return err_mask;
a2a7a662
TH
1265}
1266
2432697b 1267/**
33480a0e 1268 * ata_exec_internal - execute libata internal command
2432697b
TH
1269 * @dev: Device to which the command is sent
1270 * @tf: Taskfile registers for the command and the result
1271 * @cdb: CDB for packet command
1272 * @dma_dir: Data tranfer direction of the command
1273 * @buf: Data buffer of the command
1274 * @buflen: Length of data buffer
1275 *
1276 * Wrapper around ata_exec_internal_sg() which takes simple
1277 * buffer instead of sg list.
1278 *
1279 * LOCKING:
1280 * None. Should be called with kernel context, might sleep.
1281 *
1282 * RETURNS:
1283 * Zero on success, AC_ERR_* mask on failure
1284 */
1285unsigned ata_exec_internal(struct ata_device *dev,
1286 struct ata_taskfile *tf, const u8 *cdb,
1287 int dma_dir, void *buf, unsigned int buflen)
1288{
33480a0e
TH
1289 struct scatterlist *psg = NULL, sg;
1290 unsigned int n_elem = 0;
2432697b 1291
33480a0e
TH
1292 if (dma_dir != DMA_NONE) {
1293 WARN_ON(!buf);
1294 sg_init_one(&sg, buf, buflen);
1295 psg = &sg;
1296 n_elem++;
1297 }
2432697b 1298
33480a0e 1299 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1300}
1301
977e6b9f
TH
1302/**
1303 * ata_do_simple_cmd - execute simple internal command
1304 * @dev: Device to which the command is sent
1305 * @cmd: Opcode to execute
1306 *
1307 * Execute a 'simple' command, that only consists of the opcode
1308 * 'cmd' itself, without filling any other registers
1309 *
1310 * LOCKING:
1311 * Kernel thread context (may sleep).
1312 *
1313 * RETURNS:
1314 * Zero on success, AC_ERR_* mask on failure
e58eb583 1315 */
77b08fb5 1316unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1317{
1318 struct ata_taskfile tf;
e58eb583
TH
1319
1320 ata_tf_init(dev, &tf);
1321
1322 tf.command = cmd;
1323 tf.flags |= ATA_TFLAG_DEVICE;
1324 tf.protocol = ATA_PROT_NODATA;
1325
977e6b9f 1326 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1327}
1328
1bc4ccff
AC
1329/**
1330 * ata_pio_need_iordy - check if iordy needed
1331 * @adev: ATA device
1332 *
1333 * Check if the current speed of the device requires IORDY. Used
1334 * by various controllers for chip configuration.
1335 */
1336
1337unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1338{
1339 int pio;
1340 int speed = adev->pio_mode - XFER_PIO_0;
1341
1342 if (speed < 2)
1343 return 0;
1344 if (speed > 2)
1345 return 1;
2e9edbf8 1346
1bc4ccff
AC
1347 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1348
1349 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1350 pio = adev->id[ATA_ID_EIDE_PIO];
1351 /* Is the speed faster than the drive allows non IORDY ? */
1352 if (pio) {
1353 /* This is cycle times not frequency - watch the logic! */
1354 if (pio > 240) /* PIO2 is 240nS per cycle */
1355 return 1;
1356 return 0;
1357 }
1358 }
1359 return 0;
1360}
1361
1da177e4 1362/**
49016aca 1363 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1364 * @dev: target device
1365 * @p_class: pointer to class of the target device (may be changed)
bff04647 1366 * @flags: ATA_READID_* flags
fe635c7e 1367 * @id: buffer to read IDENTIFY data into
1da177e4 1368 *
49016aca
TH
1369 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1370 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1371 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1372 * for pre-ATA4 drives.
1da177e4
LT
1373 *
1374 * LOCKING:
49016aca
TH
1375 * Kernel thread context (may sleep)
1376 *
1377 * RETURNS:
1378 * 0 on success, -errno otherwise.
1da177e4 1379 */
a9beec95 1380int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1381 unsigned int flags, u16 *id)
1da177e4 1382{
3373efd8 1383 struct ata_port *ap = dev->ap;
49016aca 1384 unsigned int class = *p_class;
a0123703 1385 struct ata_taskfile tf;
49016aca
TH
1386 unsigned int err_mask = 0;
1387 const char *reason;
1388 int rc;
1da177e4 1389
0dd4b21f 1390 if (ata_msg_ctl(ap))
88574551
TH
1391 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1392 __FUNCTION__, ap->id, dev->devno);
1da177e4 1393
49016aca 1394 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1395
49016aca 1396 retry:
3373efd8 1397 ata_tf_init(dev, &tf);
a0123703 1398
49016aca
TH
1399 switch (class) {
1400 case ATA_DEV_ATA:
a0123703 1401 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1402 break;
1403 case ATA_DEV_ATAPI:
a0123703 1404 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1405 break;
1406 default:
1407 rc = -ENODEV;
1408 reason = "unsupported class";
1409 goto err_out;
1da177e4
LT
1410 }
1411
a0123703 1412 tf.protocol = ATA_PROT_PIO;
800b3996 1413 tf.flags |= ATA_TFLAG_POLLING; /* for polling presence detection */
1da177e4 1414
3373efd8 1415 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1416 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1417 if (err_mask) {
800b3996 1418 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8
TH
1419 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1420 ap->id, dev->devno);
1421 return -ENOENT;
1422 }
1423
49016aca
TH
1424 rc = -EIO;
1425 reason = "I/O error";
1da177e4
LT
1426 goto err_out;
1427 }
1428
49016aca 1429 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1430
49016aca 1431 /* sanity check */
a4f5749b
TH
1432 rc = -EINVAL;
1433 reason = "device reports illegal type";
1434
1435 if (class == ATA_DEV_ATA) {
1436 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1437 goto err_out;
1438 } else {
1439 if (ata_id_is_ata(id))
1440 goto err_out;
49016aca
TH
1441 }
1442
bff04647 1443 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1444 /*
1445 * The exact sequence expected by certain pre-ATA4 drives is:
1446 * SRST RESET
1447 * IDENTIFY
1448 * INITIALIZE DEVICE PARAMETERS
1449 * anything else..
1450 * Some drives were very specific about that exact sequence.
1451 */
1452 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1453 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1454 if (err_mask) {
1455 rc = -EIO;
1456 reason = "INIT_DEV_PARAMS failed";
1457 goto err_out;
1458 }
1459
1460 /* current CHS translation info (id[53-58]) might be
1461 * changed. reread the identify device info.
1462 */
bff04647 1463 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1464 goto retry;
1465 }
1466 }
1467
1468 *p_class = class;
fe635c7e 1469
49016aca
TH
1470 return 0;
1471
1472 err_out:
88574551 1473 if (ata_msg_warn(ap))
0dd4b21f 1474 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1475 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1476 return rc;
1477}
1478
3373efd8 1479static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1480{
3373efd8 1481 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1482}
1483
a6e6ce8e
TH
1484static void ata_dev_config_ncq(struct ata_device *dev,
1485 char *desc, size_t desc_sz)
1486{
1487 struct ata_port *ap = dev->ap;
1488 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1489
1490 if (!ata_id_has_ncq(dev->id)) {
1491 desc[0] = '\0';
1492 return;
1493 }
6919a0a6
AC
1494 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1495 snprintf(desc, desc_sz, "NCQ (not used)");
1496 return;
1497 }
a6e6ce8e 1498 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1499 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1500 dev->flags |= ATA_DFLAG_NCQ;
1501 }
1502
1503 if (hdepth >= ddepth)
1504 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1505 else
1506 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1507}
1508
e6d902a3
BK
1509static void ata_set_port_max_cmd_len(struct ata_port *ap)
1510{
1511 int i;
1512
cca3974e
JG
1513 if (ap->scsi_host) {
1514 unsigned int len = 0;
1515
e6d902a3 1516 for (i = 0; i < ATA_MAX_DEVICES; i++)
cca3974e
JG
1517 len = max(len, ap->device[i].cdb_len);
1518
1519 ap->scsi_host->max_cmd_len = len;
e6d902a3
BK
1520 }
1521}
1522
49016aca 1523/**
ffeae418 1524 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1525 * @dev: Target device to configure
1526 *
1527 * Configure @dev according to @dev->id. Generic and low-level
1528 * driver specific fixups are also applied.
49016aca
TH
1529 *
1530 * LOCKING:
ffeae418
TH
1531 * Kernel thread context (may sleep)
1532 *
1533 * RETURNS:
1534 * 0 on success, -errno otherwise
49016aca 1535 */
efdaedc4 1536int ata_dev_configure(struct ata_device *dev)
49016aca 1537{
3373efd8 1538 struct ata_port *ap = dev->ap;
efdaedc4 1539 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1540 const u16 *id = dev->id;
ff8854b2 1541 unsigned int xfer_mask;
b352e57d 1542 char revbuf[7]; /* XYZ-99\0 */
e6d902a3 1543 int rc;
49016aca 1544
0dd4b21f 1545 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
88574551
TH
1546 ata_dev_printk(dev, KERN_INFO,
1547 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1548 __FUNCTION__, ap->id, dev->devno);
ffeae418 1549 return 0;
49016aca
TH
1550 }
1551
0dd4b21f 1552 if (ata_msg_probe(ap))
88574551
TH
1553 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1554 __FUNCTION__, ap->id, dev->devno);
1da177e4 1555
c39f5ebe 1556 /* print device capabilities */
0dd4b21f 1557 if (ata_msg_probe(ap))
88574551
TH
1558 ata_dev_printk(dev, KERN_DEBUG,
1559 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1560 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1561 __FUNCTION__,
f15a1daf
TH
1562 id[49], id[82], id[83], id[84],
1563 id[85], id[86], id[87], id[88]);
c39f5ebe 1564
208a9933 1565 /* initialize to-be-configured parameters */
ea1dd4e1 1566 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1567 dev->max_sectors = 0;
1568 dev->cdb_len = 0;
1569 dev->n_sectors = 0;
1570 dev->cylinders = 0;
1571 dev->heads = 0;
1572 dev->sectors = 0;
1573
1da177e4
LT
1574 /*
1575 * common ATA, ATAPI feature tests
1576 */
1577
ff8854b2 1578 /* find max transfer mode; for printk only */
1148c3a7 1579 xfer_mask = ata_id_xfermask(id);
1da177e4 1580
0dd4b21f
BP
1581 if (ata_msg_probe(ap))
1582 ata_dump_id(id);
1da177e4
LT
1583
1584 /* ATA-specific feature tests */
1585 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1586 if (ata_id_is_cfa(id)) {
1587 if (id[162] & 1) /* CPRM may make this media unusable */
1588 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1589 ap->id, dev->devno);
1590 snprintf(revbuf, 7, "CFA");
1591 }
1592 else
1593 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1594
1148c3a7 1595 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1596
1148c3a7 1597 if (ata_id_has_lba(id)) {
4c2d721a 1598 const char *lba_desc;
a6e6ce8e 1599 char ncq_desc[20];
8bf62ece 1600
4c2d721a
TH
1601 lba_desc = "LBA";
1602 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1603 if (ata_id_has_lba48(id)) {
8bf62ece 1604 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1605 lba_desc = "LBA48";
6fc49adb
TH
1606
1607 if (dev->n_sectors >= (1UL << 28) &&
1608 ata_id_has_flush_ext(id))
1609 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1610 }
8bf62ece 1611
a6e6ce8e
TH
1612 /* config NCQ */
1613 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1614
8bf62ece 1615 /* print device info to dmesg */
5afc8142 1616 if (ata_msg_drv(ap) && print_info)
b352e57d 1617 ata_dev_printk(dev, KERN_INFO, "%s, "
a6e6ce8e 1618 "max %s, %Lu sectors: %s %s\n",
b352e57d 1619 revbuf,
f15a1daf
TH
1620 ata_mode_string(xfer_mask),
1621 (unsigned long long)dev->n_sectors,
a6e6ce8e 1622 lba_desc, ncq_desc);
ffeae418 1623 } else {
8bf62ece
AL
1624 /* CHS */
1625
1626 /* Default translation */
1148c3a7
TH
1627 dev->cylinders = id[1];
1628 dev->heads = id[3];
1629 dev->sectors = id[6];
8bf62ece 1630
1148c3a7 1631 if (ata_id_current_chs_valid(id)) {
8bf62ece 1632 /* Current CHS translation is valid. */
1148c3a7
TH
1633 dev->cylinders = id[54];
1634 dev->heads = id[55];
1635 dev->sectors = id[56];
8bf62ece
AL
1636 }
1637
1638 /* print device info to dmesg */
5afc8142 1639 if (ata_msg_drv(ap) && print_info)
b352e57d 1640 ata_dev_printk(dev, KERN_INFO, "%s, "
f15a1daf 1641 "max %s, %Lu sectors: CHS %u/%u/%u\n",
b352e57d 1642 revbuf,
f15a1daf
TH
1643 ata_mode_string(xfer_mask),
1644 (unsigned long long)dev->n_sectors,
88574551
TH
1645 dev->cylinders, dev->heads,
1646 dev->sectors);
1da177e4
LT
1647 }
1648
07f6f7d0
AL
1649 if (dev->id[59] & 0x100) {
1650 dev->multi_count = dev->id[59] & 0xff;
5afc8142 1651 if (ata_msg_drv(ap) && print_info)
88574551
TH
1652 ata_dev_printk(dev, KERN_INFO,
1653 "ata%u: dev %u multi count %u\n",
1654 ap->id, dev->devno, dev->multi_count);
07f6f7d0
AL
1655 }
1656
6e7846e9 1657 dev->cdb_len = 16;
1da177e4
LT
1658 }
1659
1660 /* ATAPI-specific feature tests */
2c13b7ce 1661 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1662 char *cdb_intr_string = "";
1663
1148c3a7 1664 rc = atapi_cdb_len(id);
1da177e4 1665 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1666 if (ata_msg_warn(ap))
88574551
TH
1667 ata_dev_printk(dev, KERN_WARNING,
1668 "unsupported CDB len\n");
ffeae418 1669 rc = -EINVAL;
1da177e4
LT
1670 goto err_out_nosup;
1671 }
6e7846e9 1672 dev->cdb_len = (unsigned int) rc;
1da177e4 1673
08a556db 1674 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1675 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1676 cdb_intr_string = ", CDB intr";
1677 }
312f7da2 1678
1da177e4 1679 /* print device info to dmesg */
5afc8142 1680 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1681 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1682 ata_mode_string(xfer_mask),
1683 cdb_intr_string);
1da177e4
LT
1684 }
1685
914ed354
TH
1686 /* determine max_sectors */
1687 dev->max_sectors = ATA_MAX_SECTORS;
1688 if (dev->flags & ATA_DFLAG_LBA48)
1689 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1690
93590859
AC
1691 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1692 /* Let the user know. We don't want to disallow opens for
1693 rescue purposes, or in case the vendor is just a blithering
1694 idiot */
1695 if (print_info) {
1696 ata_dev_printk(dev, KERN_WARNING,
1697"Drive reports diagnostics failure. This may indicate a drive\n");
1698 ata_dev_printk(dev, KERN_WARNING,
1699"fault or invalid emulation. Contact drive vendor for information.\n");
1700 }
1701 }
1702
e6d902a3 1703 ata_set_port_max_cmd_len(ap);
6e7846e9 1704
4b2f3ede 1705 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1706 if (ata_dev_knobble(dev)) {
5afc8142 1707 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1708 ata_dev_printk(dev, KERN_INFO,
1709 "applying bridge limits\n");
5a529139 1710 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1711 dev->max_sectors = ATA_MAX_SECTORS;
1712 }
1713
1714 if (ap->ops->dev_config)
1715 ap->ops->dev_config(ap, dev);
1716
0dd4b21f
BP
1717 if (ata_msg_probe(ap))
1718 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1719 __FUNCTION__, ata_chk_status(ap));
ffeae418 1720 return 0;
1da177e4
LT
1721
1722err_out_nosup:
0dd4b21f 1723 if (ata_msg_probe(ap))
88574551
TH
1724 ata_dev_printk(dev, KERN_DEBUG,
1725 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1726 return rc;
1da177e4
LT
1727}
1728
1729/**
1730 * ata_bus_probe - Reset and probe ATA bus
1731 * @ap: Bus to probe
1732 *
0cba632b
JG
1733 * Master ATA bus probing function. Initiates a hardware-dependent
1734 * bus reset, then attempts to identify any devices found on
1735 * the bus.
1736 *
1da177e4 1737 * LOCKING:
0cba632b 1738 * PCI/etc. bus probe sem.
1da177e4
LT
1739 *
1740 * RETURNS:
96072e69 1741 * Zero on success, negative errno otherwise.
1da177e4
LT
1742 */
1743
80289167 1744int ata_bus_probe(struct ata_port *ap)
1da177e4 1745{
28ca5c57 1746 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1
TH
1747 int tries[ATA_MAX_DEVICES];
1748 int i, rc, down_xfermask;
e82cbdb9 1749 struct ata_device *dev;
1da177e4 1750
28ca5c57 1751 ata_port_probe(ap);
c19ba8af 1752
14d2bac1
TH
1753 for (i = 0; i < ATA_MAX_DEVICES; i++)
1754 tries[i] = ATA_PROBE_MAX_TRIES;
1755
1756 retry:
1757 down_xfermask = 0;
1758
2044470c 1759 /* reset and determine device classes */
52783c5d 1760 ap->ops->phy_reset(ap);
2061a47a 1761
52783c5d
TH
1762 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1763 dev = &ap->device[i];
c19ba8af 1764
52783c5d
TH
1765 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1766 dev->class != ATA_DEV_UNKNOWN)
1767 classes[dev->devno] = dev->class;
1768 else
1769 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1770
52783c5d 1771 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1772 }
1da177e4 1773
52783c5d 1774 ata_port_probe(ap);
2044470c 1775
b6079ca4
AC
1776 /* after the reset the device state is PIO 0 and the controller
1777 state is undefined. Record the mode */
1778
1779 for (i = 0; i < ATA_MAX_DEVICES; i++)
1780 ap->device[i].pio_mode = XFER_PIO_0;
1781
28ca5c57 1782 /* read IDENTIFY page and configure devices */
1da177e4 1783 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e82cbdb9 1784 dev = &ap->device[i];
28ca5c57 1785
ec573755
TH
1786 if (tries[i])
1787 dev->class = classes[i];
ffeae418 1788
14d2bac1 1789 if (!ata_dev_enabled(dev))
ffeae418 1790 continue;
ffeae418 1791
bff04647
TH
1792 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1793 dev->id);
14d2bac1
TH
1794 if (rc)
1795 goto fail;
1796
efdaedc4
TH
1797 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1798 rc = ata_dev_configure(dev);
1799 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1800 if (rc)
1801 goto fail;
1da177e4
LT
1802 }
1803
e82cbdb9 1804 /* configure transfer mode */
3adcebb2 1805 rc = ata_set_mode(ap, &dev);
51713d35
TH
1806 if (rc) {
1807 down_xfermask = 1;
1808 goto fail;
e82cbdb9 1809 }
1da177e4 1810
e82cbdb9
TH
1811 for (i = 0; i < ATA_MAX_DEVICES; i++)
1812 if (ata_dev_enabled(&ap->device[i]))
1813 return 0;
1da177e4 1814
e82cbdb9
TH
1815 /* no device present, disable port */
1816 ata_port_disable(ap);
1da177e4 1817 ap->ops->port_disable(ap);
96072e69 1818 return -ENODEV;
14d2bac1
TH
1819
1820 fail:
1821 switch (rc) {
1822 case -EINVAL:
1823 case -ENODEV:
1824 tries[dev->devno] = 0;
1825 break;
1826 case -EIO:
3c567b7d 1827 sata_down_spd_limit(ap);
14d2bac1
TH
1828 /* fall through */
1829 default:
1830 tries[dev->devno]--;
1831 if (down_xfermask &&
3373efd8 1832 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
14d2bac1
TH
1833 tries[dev->devno] = 0;
1834 }
1835
ec573755 1836 if (!tries[dev->devno]) {
3373efd8
TH
1837 ata_down_xfermask_limit(dev, 1);
1838 ata_dev_disable(dev);
ec573755
TH
1839 }
1840
14d2bac1 1841 goto retry;
1da177e4
LT
1842}
1843
1844/**
0cba632b
JG
1845 * ata_port_probe - Mark port as enabled
1846 * @ap: Port for which we indicate enablement
1da177e4 1847 *
0cba632b
JG
1848 * Modify @ap data structure such that the system
1849 * thinks that the entire port is enabled.
1850 *
cca3974e 1851 * LOCKING: host lock, or some other form of
0cba632b 1852 * serialization.
1da177e4
LT
1853 */
1854
1855void ata_port_probe(struct ata_port *ap)
1856{
198e0fed 1857 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1858}
1859
3be680b7
TH
1860/**
1861 * sata_print_link_status - Print SATA link status
1862 * @ap: SATA port to printk link status about
1863 *
1864 * This function prints link speed and status of a SATA link.
1865 *
1866 * LOCKING:
1867 * None.
1868 */
1869static void sata_print_link_status(struct ata_port *ap)
1870{
6d5f9732 1871 u32 sstatus, scontrol, tmp;
3be680b7 1872
81952c54 1873 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1874 return;
81952c54 1875 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1876
81952c54 1877 if (ata_port_online(ap)) {
3be680b7 1878 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1879 ata_port_printk(ap, KERN_INFO,
1880 "SATA link up %s (SStatus %X SControl %X)\n",
1881 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1882 } else {
f15a1daf
TH
1883 ata_port_printk(ap, KERN_INFO,
1884 "SATA link down (SStatus %X SControl %X)\n",
1885 sstatus, scontrol);
3be680b7
TH
1886 }
1887}
1888
1da177e4 1889/**
780a87f7
JG
1890 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1891 * @ap: SATA port associated with target SATA PHY.
1da177e4 1892 *
780a87f7
JG
1893 * This function issues commands to standard SATA Sxxx
1894 * PHY registers, to wake up the phy (and device), and
1895 * clear any reset condition.
1da177e4
LT
1896 *
1897 * LOCKING:
0cba632b 1898 * PCI/etc. bus probe sem.
1da177e4
LT
1899 *
1900 */
1901void __sata_phy_reset(struct ata_port *ap)
1902{
1903 u32 sstatus;
1904 unsigned long timeout = jiffies + (HZ * 5);
1905
1906 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1907 /* issue phy wake/reset */
81952c54 1908 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1909 /* Couldn't find anything in SATA I/II specs, but
1910 * AHCI-1.1 10.4.2 says at least 1 ms. */
1911 mdelay(1);
1da177e4 1912 }
81952c54
TH
1913 /* phy wake/clear reset */
1914 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
1915
1916 /* wait for phy to become ready, if necessary */
1917 do {
1918 msleep(200);
81952c54 1919 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
1920 if ((sstatus & 0xf) != 1)
1921 break;
1922 } while (time_before(jiffies, timeout));
1923
3be680b7
TH
1924 /* print link status */
1925 sata_print_link_status(ap);
656563e3 1926
3be680b7 1927 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 1928 if (!ata_port_offline(ap))
1da177e4 1929 ata_port_probe(ap);
3be680b7 1930 else
1da177e4 1931 ata_port_disable(ap);
1da177e4 1932
198e0fed 1933 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1934 return;
1935
1936 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1937 ata_port_disable(ap);
1938 return;
1939 }
1940
1941 ap->cbl = ATA_CBL_SATA;
1942}
1943
1944/**
780a87f7
JG
1945 * sata_phy_reset - Reset SATA bus.
1946 * @ap: SATA port associated with target SATA PHY.
1da177e4 1947 *
780a87f7
JG
1948 * This function resets the SATA bus, and then probes
1949 * the bus for devices.
1da177e4
LT
1950 *
1951 * LOCKING:
0cba632b 1952 * PCI/etc. bus probe sem.
1da177e4
LT
1953 *
1954 */
1955void sata_phy_reset(struct ata_port *ap)
1956{
1957 __sata_phy_reset(ap);
198e0fed 1958 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1959 return;
1960 ata_bus_reset(ap);
1961}
1962
ebdfca6e
AC
1963/**
1964 * ata_dev_pair - return other device on cable
ebdfca6e
AC
1965 * @adev: device
1966 *
1967 * Obtain the other device on the same cable, or if none is
1968 * present NULL is returned
1969 */
2e9edbf8 1970
3373efd8 1971struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 1972{
3373efd8 1973 struct ata_port *ap = adev->ap;
ebdfca6e 1974 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 1975 if (!ata_dev_enabled(pair))
ebdfca6e
AC
1976 return NULL;
1977 return pair;
1978}
1979
1da177e4 1980/**
780a87f7
JG
1981 * ata_port_disable - Disable port.
1982 * @ap: Port to be disabled.
1da177e4 1983 *
780a87f7
JG
1984 * Modify @ap data structure such that the system
1985 * thinks that the entire port is disabled, and should
1986 * never attempt to probe or communicate with devices
1987 * on this port.
1988 *
cca3974e 1989 * LOCKING: host lock, or some other form of
780a87f7 1990 * serialization.
1da177e4
LT
1991 */
1992
1993void ata_port_disable(struct ata_port *ap)
1994{
1995 ap->device[0].class = ATA_DEV_NONE;
1996 ap->device[1].class = ATA_DEV_NONE;
198e0fed 1997 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
1998}
1999
1c3fae4d 2000/**
3c567b7d 2001 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2002 * @ap: Port to adjust SATA spd limit for
2003 *
2004 * Adjust SATA spd limit of @ap downward. Note that this
2005 * function only adjusts the limit. The change must be applied
3c567b7d 2006 * using sata_set_spd().
1c3fae4d
TH
2007 *
2008 * LOCKING:
2009 * Inherited from caller.
2010 *
2011 * RETURNS:
2012 * 0 on success, negative errno on failure
2013 */
3c567b7d 2014int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2015{
81952c54
TH
2016 u32 sstatus, spd, mask;
2017 int rc, highbit;
1c3fae4d 2018
81952c54
TH
2019 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2020 if (rc)
2021 return rc;
1c3fae4d
TH
2022
2023 mask = ap->sata_spd_limit;
2024 if (mask <= 1)
2025 return -EINVAL;
2026 highbit = fls(mask) - 1;
2027 mask &= ~(1 << highbit);
2028
81952c54 2029 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2030 if (spd <= 1)
2031 return -EINVAL;
2032 spd--;
2033 mask &= (1 << spd) - 1;
2034 if (!mask)
2035 return -EINVAL;
2036
2037 ap->sata_spd_limit = mask;
2038
f15a1daf
TH
2039 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2040 sata_spd_string(fls(mask)));
1c3fae4d
TH
2041
2042 return 0;
2043}
2044
3c567b7d 2045static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2046{
2047 u32 spd, limit;
2048
2049 if (ap->sata_spd_limit == UINT_MAX)
2050 limit = 0;
2051 else
2052 limit = fls(ap->sata_spd_limit);
2053
2054 spd = (*scontrol >> 4) & 0xf;
2055 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2056
2057 return spd != limit;
2058}
2059
2060/**
3c567b7d 2061 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2062 * @ap: Port in question
2063 *
2064 * Test whether the spd limit in SControl matches
2065 * @ap->sata_spd_limit. This function is used to determine
2066 * whether hardreset is necessary to apply SATA spd
2067 * configuration.
2068 *
2069 * LOCKING:
2070 * Inherited from caller.
2071 *
2072 * RETURNS:
2073 * 1 if SATA spd configuration is needed, 0 otherwise.
2074 */
3c567b7d 2075int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2076{
2077 u32 scontrol;
2078
81952c54 2079 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2080 return 0;
2081
3c567b7d 2082 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2083}
2084
2085/**
3c567b7d 2086 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2087 * @ap: Port to set SATA spd for
2088 *
2089 * Set SATA spd of @ap according to sata_spd_limit.
2090 *
2091 * LOCKING:
2092 * Inherited from caller.
2093 *
2094 * RETURNS:
2095 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2096 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2097 */
3c567b7d 2098int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2099{
2100 u32 scontrol;
81952c54 2101 int rc;
1c3fae4d 2102
81952c54
TH
2103 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2104 return rc;
1c3fae4d 2105
3c567b7d 2106 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2107 return 0;
2108
81952c54
TH
2109 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2110 return rc;
2111
1c3fae4d
TH
2112 return 1;
2113}
2114
452503f9
AC
2115/*
2116 * This mode timing computation functionality is ported over from
2117 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2118 */
2119/*
b352e57d 2120 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2121 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2122 * for UDMA6, which is currently supported only by Maxtor drives.
2123 *
2124 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2125 */
2126
2127static const struct ata_timing ata_timing[] = {
2128
2129 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2130 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2131 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2132 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2133
b352e57d
AC
2134 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2135 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2136 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2137 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2138 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2139
2140/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2141
452503f9
AC
2142 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2143 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2144 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2145
452503f9
AC
2146 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2147 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2148 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2149
b352e57d
AC
2150 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2151 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2152 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2153 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2154
2155 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2156 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2157 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2158
2159/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2160
2161 { 0xFF }
2162};
2163
2164#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2165#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2166
2167static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2168{
2169 q->setup = EZ(t->setup * 1000, T);
2170 q->act8b = EZ(t->act8b * 1000, T);
2171 q->rec8b = EZ(t->rec8b * 1000, T);
2172 q->cyc8b = EZ(t->cyc8b * 1000, T);
2173 q->active = EZ(t->active * 1000, T);
2174 q->recover = EZ(t->recover * 1000, T);
2175 q->cycle = EZ(t->cycle * 1000, T);
2176 q->udma = EZ(t->udma * 1000, UT);
2177}
2178
2179void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2180 struct ata_timing *m, unsigned int what)
2181{
2182 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2183 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2184 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2185 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2186 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2187 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2188 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2189 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2190}
2191
2192static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2193{
2194 const struct ata_timing *t;
2195
2196 for (t = ata_timing; t->mode != speed; t++)
91190758 2197 if (t->mode == 0xFF)
452503f9 2198 return NULL;
2e9edbf8 2199 return t;
452503f9
AC
2200}
2201
2202int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2203 struct ata_timing *t, int T, int UT)
2204{
2205 const struct ata_timing *s;
2206 struct ata_timing p;
2207
2208 /*
2e9edbf8 2209 * Find the mode.
75b1f2f8 2210 */
452503f9
AC
2211
2212 if (!(s = ata_timing_find_mode(speed)))
2213 return -EINVAL;
2214
75b1f2f8
AL
2215 memcpy(t, s, sizeof(*s));
2216
452503f9
AC
2217 /*
2218 * If the drive is an EIDE drive, it can tell us it needs extended
2219 * PIO/MW_DMA cycle timing.
2220 */
2221
2222 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2223 memset(&p, 0, sizeof(p));
2224 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2225 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2226 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2227 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2228 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2229 }
2230 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2231 }
2232
2233 /*
2234 * Convert the timing to bus clock counts.
2235 */
2236
75b1f2f8 2237 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2238
2239 /*
c893a3ae
RD
2240 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2241 * S.M.A.R.T * and some other commands. We have to ensure that the
2242 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2243 */
2244
fd3367af 2245 if (speed > XFER_PIO_6) {
452503f9
AC
2246 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2247 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2248 }
2249
2250 /*
c893a3ae 2251 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2252 */
2253
2254 if (t->act8b + t->rec8b < t->cyc8b) {
2255 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2256 t->rec8b = t->cyc8b - t->act8b;
2257 }
2258
2259 if (t->active + t->recover < t->cycle) {
2260 t->active += (t->cycle - (t->active + t->recover)) / 2;
2261 t->recover = t->cycle - t->active;
2262 }
2263
2264 return 0;
2265}
2266
cf176e1a
TH
2267/**
2268 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a
TH
2269 * @dev: Device to adjust xfer masks
2270 * @force_pio0: Force PIO0
2271 *
2272 * Adjust xfer masks of @dev downward. Note that this function
2273 * does not apply the change. Invoking ata_set_mode() afterwards
2274 * will apply the limit.
2275 *
2276 * LOCKING:
2277 * Inherited from caller.
2278 *
2279 * RETURNS:
2280 * 0 on success, negative errno on failure
2281 */
3373efd8 2282int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
cf176e1a
TH
2283{
2284 unsigned long xfer_mask;
2285 int highbit;
2286
2287 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2288 dev->udma_mask);
2289
2290 if (!xfer_mask)
2291 goto fail;
2292 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2293 if (xfer_mask & ATA_MASK_UDMA)
2294 xfer_mask &= ~ATA_MASK_MWDMA;
2295
2296 highbit = fls(xfer_mask) - 1;
2297 xfer_mask &= ~(1 << highbit);
2298 if (force_pio0)
2299 xfer_mask &= 1 << ATA_SHIFT_PIO;
2300 if (!xfer_mask)
2301 goto fail;
2302
2303 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2304 &dev->udma_mask);
2305
f15a1daf
TH
2306 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2307 ata_mode_string(xfer_mask));
cf176e1a
TH
2308
2309 return 0;
2310
2311 fail:
2312 return -EINVAL;
2313}
2314
3373efd8 2315static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2316{
baa1e78a 2317 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2318 unsigned int err_mask;
2319 int rc;
1da177e4 2320
e8384607 2321 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2322 if (dev->xfer_shift == ATA_SHIFT_PIO)
2323 dev->flags |= ATA_DFLAG_PIO;
2324
3373efd8 2325 err_mask = ata_dev_set_xfermode(dev);
83206a29 2326 if (err_mask) {
f15a1daf
TH
2327 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2328 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2329 return -EIO;
2330 }
1da177e4 2331
baa1e78a 2332 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2333 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2334 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2335 if (rc)
83206a29 2336 return rc;
48a8a14f 2337
23e71c3d
TH
2338 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2339 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2340
f15a1daf
TH
2341 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2342 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2343 return 0;
1da177e4
LT
2344}
2345
1da177e4
LT
2346/**
2347 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2348 * @ap: port on which timings will be programmed
e82cbdb9 2349 * @r_failed_dev: out paramter for failed device
1da177e4 2350 *
e82cbdb9
TH
2351 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2352 * ata_set_mode() fails, pointer to the failing device is
2353 * returned in @r_failed_dev.
780a87f7 2354 *
1da177e4 2355 * LOCKING:
0cba632b 2356 * PCI/etc. bus probe sem.
e82cbdb9
TH
2357 *
2358 * RETURNS:
2359 * 0 on success, negative errno otherwise
1da177e4 2360 */
1ad8e7f9 2361int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2362{
e8e0619f 2363 struct ata_device *dev;
e82cbdb9 2364 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2365
3adcebb2 2366 /* has private set_mode? */
b229a7b0
AC
2367 if (ap->ops->set_mode)
2368 return ap->ops->set_mode(ap, r_failed_dev);
3adcebb2 2369
a6d5a51c
TH
2370 /* step 1: calculate xfer_mask */
2371 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2372 unsigned int pio_mask, dma_mask;
a6d5a51c 2373
e8e0619f
TH
2374 dev = &ap->device[i];
2375
e1211e3f 2376 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2377 continue;
2378
3373efd8 2379 ata_dev_xfermask(dev);
1da177e4 2380
acf356b1
TH
2381 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2382 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2383 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2384 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2385
4f65977d 2386 found = 1;
5444a6f4
AC
2387 if (dev->dma_mode)
2388 used_dma = 1;
a6d5a51c 2389 }
4f65977d 2390 if (!found)
e82cbdb9 2391 goto out;
a6d5a51c
TH
2392
2393 /* step 2: always set host PIO timings */
e8e0619f
TH
2394 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2395 dev = &ap->device[i];
2396 if (!ata_dev_enabled(dev))
2397 continue;
2398
2399 if (!dev->pio_mode) {
f15a1daf 2400 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2401 rc = -EINVAL;
e82cbdb9 2402 goto out;
e8e0619f
TH
2403 }
2404
2405 dev->xfer_mode = dev->pio_mode;
2406 dev->xfer_shift = ATA_SHIFT_PIO;
2407 if (ap->ops->set_piomode)
2408 ap->ops->set_piomode(ap, dev);
2409 }
1da177e4 2410
a6d5a51c 2411 /* step 3: set host DMA timings */
e8e0619f
TH
2412 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2413 dev = &ap->device[i];
2414
2415 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2416 continue;
2417
2418 dev->xfer_mode = dev->dma_mode;
2419 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2420 if (ap->ops->set_dmamode)
2421 ap->ops->set_dmamode(ap, dev);
2422 }
1da177e4
LT
2423
2424 /* step 4: update devices' xfer mode */
83206a29 2425 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2426 dev = &ap->device[i];
1da177e4 2427
18d90deb 2428 /* don't update suspended devices' xfer mode */
02670bf3 2429 if (!ata_dev_ready(dev))
83206a29
TH
2430 continue;
2431
3373efd8 2432 rc = ata_dev_set_mode(dev);
5bbc53f4 2433 if (rc)
e82cbdb9 2434 goto out;
83206a29 2435 }
1da177e4 2436
e8e0619f
TH
2437 /* Record simplex status. If we selected DMA then the other
2438 * host channels are not permitted to do so.
5444a6f4 2439 */
cca3974e
JG
2440 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2441 ap->host->simplex_claimed = 1;
5444a6f4 2442
e8e0619f 2443 /* step5: chip specific finalisation */
1da177e4
LT
2444 if (ap->ops->post_set_mode)
2445 ap->ops->post_set_mode(ap);
2446
e82cbdb9
TH
2447 out:
2448 if (rc)
2449 *r_failed_dev = dev;
2450 return rc;
1da177e4
LT
2451}
2452
1fdffbce
JG
2453/**
2454 * ata_tf_to_host - issue ATA taskfile to host controller
2455 * @ap: port to which command is being issued
2456 * @tf: ATA taskfile register set
2457 *
2458 * Issues ATA taskfile register set to ATA host controller,
2459 * with proper synchronization with interrupt handler and
2460 * other threads.
2461 *
2462 * LOCKING:
cca3974e 2463 * spin_lock_irqsave(host lock)
1fdffbce
JG
2464 */
2465
2466static inline void ata_tf_to_host(struct ata_port *ap,
2467 const struct ata_taskfile *tf)
2468{
2469 ap->ops->tf_load(ap, tf);
2470 ap->ops->exec_command(ap, tf);
2471}
2472
1da177e4
LT
2473/**
2474 * ata_busy_sleep - sleep until BSY clears, or timeout
2475 * @ap: port containing status register to be polled
2476 * @tmout_pat: impatience timeout
2477 * @tmout: overall timeout
2478 *
780a87f7
JG
2479 * Sleep until ATA Status register bit BSY clears,
2480 * or a timeout occurs.
2481 *
d1adc1bb
TH
2482 * LOCKING:
2483 * Kernel thread context (may sleep).
2484 *
2485 * RETURNS:
2486 * 0 on success, -errno otherwise.
1da177e4 2487 */
d1adc1bb
TH
2488int ata_busy_sleep(struct ata_port *ap,
2489 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2490{
2491 unsigned long timer_start, timeout;
2492 u8 status;
2493
2494 status = ata_busy_wait(ap, ATA_BUSY, 300);
2495 timer_start = jiffies;
2496 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2497 while (status != 0xff && (status & ATA_BUSY) &&
2498 time_before(jiffies, timeout)) {
1da177e4
LT
2499 msleep(50);
2500 status = ata_busy_wait(ap, ATA_BUSY, 3);
2501 }
2502
d1adc1bb 2503 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2504 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2505 "port is slow to respond, please be patient "
2506 "(Status 0x%x)\n", status);
1da177e4
LT
2507
2508 timeout = timer_start + tmout;
d1adc1bb
TH
2509 while (status != 0xff && (status & ATA_BUSY) &&
2510 time_before(jiffies, timeout)) {
1da177e4
LT
2511 msleep(50);
2512 status = ata_chk_status(ap);
2513 }
2514
d1adc1bb
TH
2515 if (status == 0xff)
2516 return -ENODEV;
2517
1da177e4 2518 if (status & ATA_BUSY) {
f15a1daf 2519 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2520 "(%lu secs, Status 0x%x)\n",
2521 tmout / HZ, status);
d1adc1bb 2522 return -EBUSY;
1da177e4
LT
2523 }
2524
2525 return 0;
2526}
2527
2528static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2529{
2530 struct ata_ioports *ioaddr = &ap->ioaddr;
2531 unsigned int dev0 = devmask & (1 << 0);
2532 unsigned int dev1 = devmask & (1 << 1);
2533 unsigned long timeout;
2534
2535 /* if device 0 was found in ata_devchk, wait for its
2536 * BSY bit to clear
2537 */
2538 if (dev0)
2539 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2540
2541 /* if device 1 was found in ata_devchk, wait for
2542 * register access, then wait for BSY to clear
2543 */
2544 timeout = jiffies + ATA_TMOUT_BOOT;
2545 while (dev1) {
2546 u8 nsect, lbal;
2547
2548 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2549 nsect = ioread8(ioaddr->nsect_addr);
2550 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2551 if ((nsect == 1) && (lbal == 1))
2552 break;
2553 if (time_after(jiffies, timeout)) {
2554 dev1 = 0;
2555 break;
2556 }
2557 msleep(50); /* give drive a breather */
2558 }
2559 if (dev1)
2560 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2561
2562 /* is all this really necessary? */
2563 ap->ops->dev_select(ap, 0);
2564 if (dev1)
2565 ap->ops->dev_select(ap, 1);
2566 if (dev0)
2567 ap->ops->dev_select(ap, 0);
2568}
2569
1da177e4
LT
2570static unsigned int ata_bus_softreset(struct ata_port *ap,
2571 unsigned int devmask)
2572{
2573 struct ata_ioports *ioaddr = &ap->ioaddr;
2574
2575 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2576
2577 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2578 iowrite8(ap->ctl, ioaddr->ctl_addr);
2579 udelay(20); /* FIXME: flush */
2580 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2581 udelay(20); /* FIXME: flush */
2582 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2583
2584 /* spec mandates ">= 2ms" before checking status.
2585 * We wait 150ms, because that was the magic delay used for
2586 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2587 * between when the ATA command register is written, and then
2588 * status is checked. Because waiting for "a while" before
2589 * checking status is fine, post SRST, we perform this magic
2590 * delay here as well.
09c7ad79
AC
2591 *
2592 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2593 */
2594 msleep(150);
2595
2e9edbf8 2596 /* Before we perform post reset processing we want to see if
298a41ca
TH
2597 * the bus shows 0xFF because the odd clown forgets the D7
2598 * pulldown resistor.
2599 */
d1adc1bb
TH
2600 if (ata_check_status(ap) == 0xFF)
2601 return 0;
09c7ad79 2602
1da177e4
LT
2603 ata_bus_post_reset(ap, devmask);
2604
2605 return 0;
2606}
2607
2608/**
2609 * ata_bus_reset - reset host port and associated ATA channel
2610 * @ap: port to reset
2611 *
2612 * This is typically the first time we actually start issuing
2613 * commands to the ATA channel. We wait for BSY to clear, then
2614 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2615 * result. Determine what devices, if any, are on the channel
2616 * by looking at the device 0/1 error register. Look at the signature
2617 * stored in each device's taskfile registers, to determine if
2618 * the device is ATA or ATAPI.
2619 *
2620 * LOCKING:
0cba632b 2621 * PCI/etc. bus probe sem.
cca3974e 2622 * Obtains host lock.
1da177e4
LT
2623 *
2624 * SIDE EFFECTS:
198e0fed 2625 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2626 */
2627
2628void ata_bus_reset(struct ata_port *ap)
2629{
2630 struct ata_ioports *ioaddr = &ap->ioaddr;
2631 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2632 u8 err;
aec5c3c1 2633 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4
LT
2634
2635 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2636
2637 /* determine if device 0/1 are present */
2638 if (ap->flags & ATA_FLAG_SATA_RESET)
2639 dev0 = 1;
2640 else {
2641 dev0 = ata_devchk(ap, 0);
2642 if (slave_possible)
2643 dev1 = ata_devchk(ap, 1);
2644 }
2645
2646 if (dev0)
2647 devmask |= (1 << 0);
2648 if (dev1)
2649 devmask |= (1 << 1);
2650
2651 /* select device 0 again */
2652 ap->ops->dev_select(ap, 0);
2653
2654 /* issue bus reset */
2655 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2656 if (ata_bus_softreset(ap, devmask))
2657 goto err_out;
1da177e4
LT
2658
2659 /*
2660 * determine by signature whether we have ATA or ATAPI devices
2661 */
b4dc7623 2662 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2663 if ((slave_possible) && (err != 0x81))
b4dc7623 2664 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2665
2666 /* re-enable interrupts */
83625006 2667 ap->ops->irq_on(ap);
1da177e4
LT
2668
2669 /* is double-select really necessary? */
2670 if (ap->device[1].class != ATA_DEV_NONE)
2671 ap->ops->dev_select(ap, 1);
2672 if (ap->device[0].class != ATA_DEV_NONE)
2673 ap->ops->dev_select(ap, 0);
2674
2675 /* if no devices were detected, disable this port */
2676 if ((ap->device[0].class == ATA_DEV_NONE) &&
2677 (ap->device[1].class == ATA_DEV_NONE))
2678 goto err_out;
2679
2680 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2681 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2682 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2683 }
2684
2685 DPRINTK("EXIT\n");
2686 return;
2687
2688err_out:
f15a1daf 2689 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2690 ap->ops->port_disable(ap);
2691
2692 DPRINTK("EXIT\n");
2693}
2694
d7bb4cc7
TH
2695/**
2696 * sata_phy_debounce - debounce SATA phy status
2697 * @ap: ATA port to debounce SATA phy status for
2698 * @params: timing parameters { interval, duratinon, timeout } in msec
2699 *
2700 * Make sure SStatus of @ap reaches stable state, determined by
2701 * holding the same value where DET is not 1 for @duration polled
2702 * every @interval, before @timeout. Timeout constraints the
2703 * beginning of the stable state. Because, after hot unplugging,
2704 * DET gets stuck at 1 on some controllers, this functions waits
2705 * until timeout then returns 0 if DET is stable at 1.
2706 *
2707 * LOCKING:
2708 * Kernel thread context (may sleep)
2709 *
2710 * RETURNS:
2711 * 0 on success, -errno on failure.
2712 */
2713int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2714{
d7bb4cc7
TH
2715 unsigned long interval_msec = params[0];
2716 unsigned long duration = params[1] * HZ / 1000;
2717 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2718 unsigned long last_jiffies;
2719 u32 last, cur;
2720 int rc;
2721
2722 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2723 return rc;
2724 cur &= 0xf;
2725
2726 last = cur;
2727 last_jiffies = jiffies;
2728
2729 while (1) {
2730 msleep(interval_msec);
2731 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2732 return rc;
2733 cur &= 0xf;
2734
2735 /* DET stable? */
2736 if (cur == last) {
2737 if (cur == 1 && time_before(jiffies, timeout))
2738 continue;
2739 if (time_after(jiffies, last_jiffies + duration))
2740 return 0;
2741 continue;
2742 }
2743
2744 /* unstable, start over */
2745 last = cur;
2746 last_jiffies = jiffies;
2747
2748 /* check timeout */
2749 if (time_after(jiffies, timeout))
2750 return -EBUSY;
2751 }
2752}
2753
2754/**
2755 * sata_phy_resume - resume SATA phy
2756 * @ap: ATA port to resume SATA phy for
2757 * @params: timing parameters { interval, duratinon, timeout } in msec
2758 *
2759 * Resume SATA phy of @ap and debounce it.
2760 *
2761 * LOCKING:
2762 * Kernel thread context (may sleep)
2763 *
2764 * RETURNS:
2765 * 0 on success, -errno on failure.
2766 */
2767int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2768{
2769 u32 scontrol;
81952c54
TH
2770 int rc;
2771
2772 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2773 return rc;
7a7921e8 2774
852ee16a 2775 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2776
2777 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2778 return rc;
7a7921e8 2779
d7bb4cc7
TH
2780 /* Some PHYs react badly if SStatus is pounded immediately
2781 * after resuming. Delay 200ms before debouncing.
2782 */
2783 msleep(200);
7a7921e8 2784
d7bb4cc7 2785 return sata_phy_debounce(ap, params);
7a7921e8
TH
2786}
2787
f5914a46
TH
2788static void ata_wait_spinup(struct ata_port *ap)
2789{
2790 struct ata_eh_context *ehc = &ap->eh_context;
2791 unsigned long end, secs;
2792 int rc;
2793
2794 /* first, debounce phy if SATA */
2795 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2796 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2797
2798 /* if debounced successfully and offline, no need to wait */
2799 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2800 return;
2801 }
2802
2803 /* okay, let's give the drive time to spin up */
2804 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2805 secs = ((end - jiffies) + HZ - 1) / HZ;
2806
2807 if (time_after(jiffies, end))
2808 return;
2809
2810 if (secs > 5)
2811 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2812 "(%lu secs)\n", secs);
2813
2814 schedule_timeout_uninterruptible(end - jiffies);
2815}
2816
2817/**
2818 * ata_std_prereset - prepare for reset
2819 * @ap: ATA port to be reset
2820 *
2821 * @ap is about to be reset. Initialize it.
2822 *
2823 * LOCKING:
2824 * Kernel thread context (may sleep)
2825 *
2826 * RETURNS:
2827 * 0 on success, -errno otherwise.
2828 */
2829int ata_std_prereset(struct ata_port *ap)
2830{
2831 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2832 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2833 int rc;
2834
28324304
TH
2835 /* handle link resume & hotplug spinup */
2836 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2837 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2838 ehc->i.action |= ATA_EH_HARDRESET;
2839
2840 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2841 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2842 ata_wait_spinup(ap);
f5914a46
TH
2843
2844 /* if we're about to do hardreset, nothing more to do */
2845 if (ehc->i.action & ATA_EH_HARDRESET)
2846 return 0;
2847
2848 /* if SATA, resume phy */
2849 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2850 rc = sata_phy_resume(ap, timing);
2851 if (rc && rc != -EOPNOTSUPP) {
2852 /* phy resume failed */
2853 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2854 "link for reset (errno=%d)\n", rc);
2855 return rc;
2856 }
2857 }
2858
2859 /* Wait for !BSY if the controller can wait for the first D2H
2860 * Reg FIS and we don't know that no device is attached.
2861 */
2862 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2863 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2864
2865 return 0;
2866}
2867
c2bd5804
TH
2868/**
2869 * ata_std_softreset - reset host port via ATA SRST
2870 * @ap: port to reset
c2bd5804
TH
2871 * @classes: resulting classes of attached devices
2872 *
52783c5d 2873 * Reset host port using ATA SRST.
c2bd5804
TH
2874 *
2875 * LOCKING:
2876 * Kernel thread context (may sleep)
2877 *
2878 * RETURNS:
2879 * 0 on success, -errno otherwise.
2880 */
2bf2cb26 2881int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
2882{
2883 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2884 unsigned int devmask = 0, err_mask;
2885 u8 err;
2886
2887 DPRINTK("ENTER\n");
2888
81952c54 2889 if (ata_port_offline(ap)) {
3a39746a
TH
2890 classes[0] = ATA_DEV_NONE;
2891 goto out;
2892 }
2893
c2bd5804
TH
2894 /* determine if device 0/1 are present */
2895 if (ata_devchk(ap, 0))
2896 devmask |= (1 << 0);
2897 if (slave_possible && ata_devchk(ap, 1))
2898 devmask |= (1 << 1);
2899
c2bd5804
TH
2900 /* select device 0 again */
2901 ap->ops->dev_select(ap, 0);
2902
2903 /* issue bus reset */
2904 DPRINTK("about to softreset, devmask=%x\n", devmask);
2905 err_mask = ata_bus_softreset(ap, devmask);
2906 if (err_mask) {
f15a1daf
TH
2907 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2908 err_mask);
c2bd5804
TH
2909 return -EIO;
2910 }
2911
2912 /* determine by signature whether we have ATA or ATAPI devices */
2913 classes[0] = ata_dev_try_classify(ap, 0, &err);
2914 if (slave_possible && err != 0x81)
2915 classes[1] = ata_dev_try_classify(ap, 1, &err);
2916
3a39746a 2917 out:
c2bd5804
TH
2918 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2919 return 0;
2920}
2921
2922/**
b6103f6d 2923 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 2924 * @ap: port to reset
b6103f6d 2925 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
2926 *
2927 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
2928 *
2929 * LOCKING:
2930 * Kernel thread context (may sleep)
2931 *
2932 * RETURNS:
2933 * 0 on success, -errno otherwise.
2934 */
b6103f6d 2935int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 2936{
852ee16a 2937 u32 scontrol;
81952c54 2938 int rc;
852ee16a 2939
c2bd5804
TH
2940 DPRINTK("ENTER\n");
2941
3c567b7d 2942 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
2943 /* SATA spec says nothing about how to reconfigure
2944 * spd. To be on the safe side, turn off phy during
2945 * reconfiguration. This works for at least ICH7 AHCI
2946 * and Sil3124.
2947 */
81952c54 2948 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 2949 goto out;
81952c54 2950
a34b6fc0 2951 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
2952
2953 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 2954 goto out;
1c3fae4d 2955
3c567b7d 2956 sata_set_spd(ap);
1c3fae4d
TH
2957 }
2958
2959 /* issue phy wake/reset */
81952c54 2960 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 2961 goto out;
81952c54 2962
852ee16a 2963 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
2964
2965 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 2966 goto out;
c2bd5804 2967
1c3fae4d 2968 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
2969 * 10.4.2 says at least 1 ms.
2970 */
2971 msleep(1);
2972
1c3fae4d 2973 /* bring phy back */
b6103f6d
TH
2974 rc = sata_phy_resume(ap, timing);
2975 out:
2976 DPRINTK("EXIT, rc=%d\n", rc);
2977 return rc;
2978}
2979
2980/**
2981 * sata_std_hardreset - reset host port via SATA phy reset
2982 * @ap: port to reset
2983 * @class: resulting class of attached device
2984 *
2985 * SATA phy-reset host port using DET bits of SControl register,
2986 * wait for !BSY and classify the attached device.
2987 *
2988 * LOCKING:
2989 * Kernel thread context (may sleep)
2990 *
2991 * RETURNS:
2992 * 0 on success, -errno otherwise.
2993 */
2994int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2995{
2996 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
2997 int rc;
2998
2999 DPRINTK("ENTER\n");
3000
3001 /* do hardreset */
3002 rc = sata_port_hardreset(ap, timing);
3003 if (rc) {
3004 ata_port_printk(ap, KERN_ERR,
3005 "COMRESET failed (errno=%d)\n", rc);
3006 return rc;
3007 }
c2bd5804 3008
c2bd5804 3009 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3010 if (ata_port_offline(ap)) {
c2bd5804
TH
3011 *class = ATA_DEV_NONE;
3012 DPRINTK("EXIT, link offline\n");
3013 return 0;
3014 }
3015
3016 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3017 ata_port_printk(ap, KERN_ERR,
3018 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3019 return -EIO;
3020 }
3021
3a39746a
TH
3022 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3023
c2bd5804
TH
3024 *class = ata_dev_try_classify(ap, 0, NULL);
3025
3026 DPRINTK("EXIT, class=%u\n", *class);
3027 return 0;
3028}
3029
3030/**
3031 * ata_std_postreset - standard postreset callback
3032 * @ap: the target ata_port
3033 * @classes: classes of attached devices
3034 *
3035 * This function is invoked after a successful reset. Note that
3036 * the device might have been reset more than once using
3037 * different reset methods before postreset is invoked.
c2bd5804 3038 *
c2bd5804
TH
3039 * LOCKING:
3040 * Kernel thread context (may sleep)
3041 */
3042void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3043{
dc2b3515
TH
3044 u32 serror;
3045
c2bd5804
TH
3046 DPRINTK("ENTER\n");
3047
c2bd5804 3048 /* print link status */
81952c54 3049 sata_print_link_status(ap);
c2bd5804 3050
dc2b3515
TH
3051 /* clear SError */
3052 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3053 sata_scr_write(ap, SCR_ERROR, serror);
3054
3a39746a 3055 /* re-enable interrupts */
83625006
AI
3056 if (!ap->ops->error_handler)
3057 ap->ops->irq_on(ap);
c2bd5804
TH
3058
3059 /* is double-select really necessary? */
3060 if (classes[0] != ATA_DEV_NONE)
3061 ap->ops->dev_select(ap, 1);
3062 if (classes[1] != ATA_DEV_NONE)
3063 ap->ops->dev_select(ap, 0);
3064
3a39746a
TH
3065 /* bail out if no device is present */
3066 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3067 DPRINTK("EXIT, no device\n");
3068 return;
3069 }
3070
3071 /* set up device control */
0d5ff566
TH
3072 if (ap->ioaddr.ctl_addr)
3073 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3074
3075 DPRINTK("EXIT\n");
3076}
3077
623a3128
TH
3078/**
3079 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3080 * @dev: device to compare against
3081 * @new_class: class of the new device
3082 * @new_id: IDENTIFY page of the new device
3083 *
3084 * Compare @new_class and @new_id against @dev and determine
3085 * whether @dev is the device indicated by @new_class and
3086 * @new_id.
3087 *
3088 * LOCKING:
3089 * None.
3090 *
3091 * RETURNS:
3092 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3093 */
3373efd8
TH
3094static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3095 const u16 *new_id)
623a3128
TH
3096{
3097 const u16 *old_id = dev->id;
a0cf733b
TH
3098 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3099 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3100 u64 new_n_sectors;
3101
3102 if (dev->class != new_class) {
f15a1daf
TH
3103 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3104 dev->class, new_class);
623a3128
TH
3105 return 0;
3106 }
3107
a0cf733b
TH
3108 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3109 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3110 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3111 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3112 new_n_sectors = ata_id_n_sectors(new_id);
3113
3114 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3115 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3116 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3117 return 0;
3118 }
3119
3120 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3121 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3122 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3123 return 0;
3124 }
3125
3126 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3127 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3128 "%llu != %llu\n",
3129 (unsigned long long)dev->n_sectors,
3130 (unsigned long long)new_n_sectors);
623a3128
TH
3131 return 0;
3132 }
3133
3134 return 1;
3135}
3136
3137/**
3138 * ata_dev_revalidate - Revalidate ATA device
623a3128 3139 * @dev: device to revalidate
bff04647 3140 * @readid_flags: read ID flags
623a3128
TH
3141 *
3142 * Re-read IDENTIFY page and make sure @dev is still attached to
3143 * the port.
3144 *
3145 * LOCKING:
3146 * Kernel thread context (may sleep)
3147 *
3148 * RETURNS:
3149 * 0 on success, negative errno otherwise
3150 */
bff04647 3151int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3152{
5eb45c02 3153 unsigned int class = dev->class;
f15a1daf 3154 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3155 int rc;
3156
5eb45c02
TH
3157 if (!ata_dev_enabled(dev)) {
3158 rc = -ENODEV;
3159 goto fail;
3160 }
623a3128 3161
fe635c7e 3162 /* read ID data */
bff04647 3163 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3164 if (rc)
3165 goto fail;
3166
3167 /* is the device still there? */
3373efd8 3168 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3169 rc = -ENODEV;
3170 goto fail;
3171 }
3172
fe635c7e 3173 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3174
3175 /* configure device according to the new ID */
efdaedc4 3176 rc = ata_dev_configure(dev);
5eb45c02
TH
3177 if (rc == 0)
3178 return 0;
623a3128
TH
3179
3180 fail:
f15a1daf 3181 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3182 return rc;
3183}
3184
6919a0a6
AC
3185struct ata_blacklist_entry {
3186 const char *model_num;
3187 const char *model_rev;
3188 unsigned long horkage;
3189};
3190
3191static const struct ata_blacklist_entry ata_device_blacklist [] = {
3192 /* Devices with DMA related problems under Linux */
3193 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3194 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3195 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3196 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3197 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3198 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3199 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3200 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3201 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3202 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3203 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3204 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3205 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3206 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3207 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3208 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3209 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3210 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3211 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3212 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3213 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3214 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3215 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3216 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3217 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3218 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3219 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3220 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3221 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3222 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3223
3224 /* Devices we expect to fail diagnostics */
3225
3226 /* Devices where NCQ should be avoided */
3227 /* NCQ is slow */
3228 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3229
3230 /* Devices with NCQ limits */
3231
3232 /* End Marker */
3233 { }
1da177e4 3234};
2e9edbf8 3235
6919a0a6 3236unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3237{
8bfa79fc
TH
3238 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3239 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3240 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3241
8bfa79fc
TH
3242 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3243 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3244
6919a0a6 3245 while (ad->model_num) {
8bfa79fc 3246 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3247 if (ad->model_rev == NULL)
3248 return ad->horkage;
8bfa79fc 3249 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3250 return ad->horkage;
f4b15fef 3251 }
6919a0a6 3252 ad++;
f4b15fef 3253 }
1da177e4
LT
3254 return 0;
3255}
3256
6919a0a6
AC
3257static int ata_dma_blacklisted(const struct ata_device *dev)
3258{
3259 /* We don't support polling DMA.
3260 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3261 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3262 */
3263 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3264 (dev->flags & ATA_DFLAG_CDB_INTR))
3265 return 1;
3266 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3267}
3268
a6d5a51c
TH
3269/**
3270 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3271 * @dev: Device to compute xfermask for
3272 *
acf356b1
TH
3273 * Compute supported xfermask of @dev and store it in
3274 * dev->*_mask. This function is responsible for applying all
3275 * known limits including host controller limits, device
3276 * blacklist, etc...
a6d5a51c
TH
3277 *
3278 * LOCKING:
3279 * None.
a6d5a51c 3280 */
3373efd8 3281static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3282{
3373efd8 3283 struct ata_port *ap = dev->ap;
cca3974e 3284 struct ata_host *host = ap->host;
a6d5a51c 3285 unsigned long xfer_mask;
1da177e4 3286
37deecb5 3287 /* controller modes available */
565083e1
TH
3288 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3289 ap->mwdma_mask, ap->udma_mask);
3290
3291 /* Apply cable rule here. Don't apply it early because when
3292 * we handle hot plug the cable type can itself change.
3293 */
3294 if (ap->cbl == ATA_CBL_PATA40)
3295 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
fc085150
AC
3296 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3297 * host side are checked drive side as well. Cases where we know a
3298 * 40wire cable is used safely for 80 are not checked here.
3299 */
3300 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3301 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3302
1da177e4 3303
37deecb5
TH
3304 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3305 dev->mwdma_mask, dev->udma_mask);
3306 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3307
b352e57d
AC
3308 /*
3309 * CFA Advanced TrueIDE timings are not allowed on a shared
3310 * cable
3311 */
3312 if (ata_dev_pair(dev)) {
3313 /* No PIO5 or PIO6 */
3314 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3315 /* No MWDMA3 or MWDMA 4 */
3316 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3317 }
3318
37deecb5
TH
3319 if (ata_dma_blacklisted(dev)) {
3320 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3321 ata_dev_printk(dev, KERN_WARNING,
3322 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3323 }
a6d5a51c 3324
cca3974e 3325 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
37deecb5
TH
3326 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3327 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3328 "other device, disabling DMA\n");
5444a6f4 3329 }
565083e1 3330
5444a6f4
AC
3331 if (ap->ops->mode_filter)
3332 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3333
565083e1
TH
3334 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3335 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3336}
3337
1da177e4
LT
3338/**
3339 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3340 * @dev: Device to which command will be sent
3341 *
780a87f7
JG
3342 * Issue SET FEATURES - XFER MODE command to device @dev
3343 * on port @ap.
3344 *
1da177e4 3345 * LOCKING:
0cba632b 3346 * PCI/etc. bus probe sem.
83206a29
TH
3347 *
3348 * RETURNS:
3349 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3350 */
3351
3373efd8 3352static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3353{
a0123703 3354 struct ata_taskfile tf;
83206a29 3355 unsigned int err_mask;
1da177e4
LT
3356
3357 /* set up set-features taskfile */
3358 DPRINTK("set features - xfer mode\n");
3359
3373efd8 3360 ata_tf_init(dev, &tf);
a0123703
TH
3361 tf.command = ATA_CMD_SET_FEATURES;
3362 tf.feature = SETFEATURES_XFER;
3363 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3364 tf.protocol = ATA_PROT_NODATA;
3365 tf.nsect = dev->xfer_mode;
1da177e4 3366
3373efd8 3367 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3368
83206a29
TH
3369 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3370 return err_mask;
1da177e4
LT
3371}
3372
8bf62ece
AL
3373/**
3374 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3375 * @dev: Device to which command will be sent
e2a7f77a
RD
3376 * @heads: Number of heads (taskfile parameter)
3377 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3378 *
3379 * LOCKING:
6aff8f1f
TH
3380 * Kernel thread context (may sleep)
3381 *
3382 * RETURNS:
3383 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3384 */
3373efd8
TH
3385static unsigned int ata_dev_init_params(struct ata_device *dev,
3386 u16 heads, u16 sectors)
8bf62ece 3387{
a0123703 3388 struct ata_taskfile tf;
6aff8f1f 3389 unsigned int err_mask;
8bf62ece
AL
3390
3391 /* Number of sectors per track 1-255. Number of heads 1-16 */
3392 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3393 return AC_ERR_INVALID;
8bf62ece
AL
3394
3395 /* set up init dev params taskfile */
3396 DPRINTK("init dev params \n");
3397
3373efd8 3398 ata_tf_init(dev, &tf);
a0123703
TH
3399 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3400 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3401 tf.protocol = ATA_PROT_NODATA;
3402 tf.nsect = sectors;
3403 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3404
3373efd8 3405 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3406
6aff8f1f
TH
3407 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3408 return err_mask;
8bf62ece
AL
3409}
3410
1da177e4 3411/**
0cba632b
JG
3412 * ata_sg_clean - Unmap DMA memory associated with command
3413 * @qc: Command containing DMA memory to be released
3414 *
3415 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3416 *
3417 * LOCKING:
cca3974e 3418 * spin_lock_irqsave(host lock)
1da177e4 3419 */
70e6ad0c 3420void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3421{
3422 struct ata_port *ap = qc->ap;
cedc9a47 3423 struct scatterlist *sg = qc->__sg;
1da177e4 3424 int dir = qc->dma_dir;
cedc9a47 3425 void *pad_buf = NULL;
1da177e4 3426
a4631474
TH
3427 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3428 WARN_ON(sg == NULL);
1da177e4
LT
3429
3430 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3431 WARN_ON(qc->n_elem > 1);
1da177e4 3432
2c13b7ce 3433 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3434
cedc9a47
JG
3435 /* if we padded the buffer out to 32-bit bound, and data
3436 * xfer direction is from-device, we must copy from the
3437 * pad buffer back into the supplied buffer
3438 */
3439 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3440 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3441
3442 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3443 if (qc->n_elem)
2f1f610b 3444 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3445 /* restore last sg */
3446 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3447 if (pad_buf) {
3448 struct scatterlist *psg = &qc->pad_sgent;
3449 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3450 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3451 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3452 }
3453 } else {
2e242fa9 3454 if (qc->n_elem)
2f1f610b 3455 dma_unmap_single(ap->dev,
e1410f2d
JG
3456 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3457 dir);
cedc9a47
JG
3458 /* restore sg */
3459 sg->length += qc->pad_len;
3460 if (pad_buf)
3461 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3462 pad_buf, qc->pad_len);
3463 }
1da177e4
LT
3464
3465 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3466 qc->__sg = NULL;
1da177e4
LT
3467}
3468
3469/**
3470 * ata_fill_sg - Fill PCI IDE PRD table
3471 * @qc: Metadata associated with taskfile to be transferred
3472 *
780a87f7
JG
3473 * Fill PCI IDE PRD (scatter-gather) table with segments
3474 * associated with the current disk command.
3475 *
1da177e4 3476 * LOCKING:
cca3974e 3477 * spin_lock_irqsave(host lock)
1da177e4
LT
3478 *
3479 */
3480static void ata_fill_sg(struct ata_queued_cmd *qc)
3481{
1da177e4 3482 struct ata_port *ap = qc->ap;
cedc9a47
JG
3483 struct scatterlist *sg;
3484 unsigned int idx;
1da177e4 3485
a4631474 3486 WARN_ON(qc->__sg == NULL);
f131883e 3487 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3488
3489 idx = 0;
cedc9a47 3490 ata_for_each_sg(sg, qc) {
1da177e4
LT
3491 u32 addr, offset;
3492 u32 sg_len, len;
3493
3494 /* determine if physical DMA addr spans 64K boundary.
3495 * Note h/w doesn't support 64-bit, so we unconditionally
3496 * truncate dma_addr_t to u32.
3497 */
3498 addr = (u32) sg_dma_address(sg);
3499 sg_len = sg_dma_len(sg);
3500
3501 while (sg_len) {
3502 offset = addr & 0xffff;
3503 len = sg_len;
3504 if ((offset + sg_len) > 0x10000)
3505 len = 0x10000 - offset;
3506
3507 ap->prd[idx].addr = cpu_to_le32(addr);
3508 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3509 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3510
3511 idx++;
3512 sg_len -= len;
3513 addr += len;
3514 }
3515 }
3516
3517 if (idx)
3518 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3519}
3520/**
3521 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3522 * @qc: Metadata associated with taskfile to check
3523 *
780a87f7
JG
3524 * Allow low-level driver to filter ATA PACKET commands, returning
3525 * a status indicating whether or not it is OK to use DMA for the
3526 * supplied PACKET command.
3527 *
1da177e4 3528 * LOCKING:
cca3974e 3529 * spin_lock_irqsave(host lock)
0cba632b 3530 *
1da177e4
LT
3531 * RETURNS: 0 when ATAPI DMA can be used
3532 * nonzero otherwise
3533 */
3534int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3535{
3536 struct ata_port *ap = qc->ap;
3537 int rc = 0; /* Assume ATAPI DMA is OK by default */
3538
3539 if (ap->ops->check_atapi_dma)
3540 rc = ap->ops->check_atapi_dma(qc);
3541
3542 return rc;
3543}
3544/**
3545 * ata_qc_prep - Prepare taskfile for submission
3546 * @qc: Metadata associated with taskfile to be prepared
3547 *
780a87f7
JG
3548 * Prepare ATA taskfile for submission.
3549 *
1da177e4 3550 * LOCKING:
cca3974e 3551 * spin_lock_irqsave(host lock)
1da177e4
LT
3552 */
3553void ata_qc_prep(struct ata_queued_cmd *qc)
3554{
3555 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3556 return;
3557
3558 ata_fill_sg(qc);
3559}
3560
e46834cd
BK
3561void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3562
0cba632b
JG
3563/**
3564 * ata_sg_init_one - Associate command with memory buffer
3565 * @qc: Command to be associated
3566 * @buf: Memory buffer
3567 * @buflen: Length of memory buffer, in bytes.
3568 *
3569 * Initialize the data-related elements of queued_cmd @qc
3570 * to point to a single memory buffer, @buf of byte length @buflen.
3571 *
3572 * LOCKING:
cca3974e 3573 * spin_lock_irqsave(host lock)
0cba632b
JG
3574 */
3575
1da177e4
LT
3576void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3577{
1da177e4
LT
3578 qc->flags |= ATA_QCFLAG_SINGLE;
3579
cedc9a47 3580 qc->__sg = &qc->sgent;
1da177e4 3581 qc->n_elem = 1;
cedc9a47 3582 qc->orig_n_elem = 1;
1da177e4 3583 qc->buf_virt = buf;
233277ca 3584 qc->nbytes = buflen;
1da177e4 3585
61c0596c 3586 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3587}
3588
0cba632b
JG
3589/**
3590 * ata_sg_init - Associate command with scatter-gather table.
3591 * @qc: Command to be associated
3592 * @sg: Scatter-gather table.
3593 * @n_elem: Number of elements in s/g table.
3594 *
3595 * Initialize the data-related elements of queued_cmd @qc
3596 * to point to a scatter-gather table @sg, containing @n_elem
3597 * elements.
3598 *
3599 * LOCKING:
cca3974e 3600 * spin_lock_irqsave(host lock)
0cba632b
JG
3601 */
3602
1da177e4
LT
3603void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3604 unsigned int n_elem)
3605{
3606 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3607 qc->__sg = sg;
1da177e4 3608 qc->n_elem = n_elem;
cedc9a47 3609 qc->orig_n_elem = n_elem;
1da177e4
LT
3610}
3611
3612/**
0cba632b
JG
3613 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3614 * @qc: Command with memory buffer to be mapped.
3615 *
3616 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3617 *
3618 * LOCKING:
cca3974e 3619 * spin_lock_irqsave(host lock)
1da177e4
LT
3620 *
3621 * RETURNS:
0cba632b 3622 * Zero on success, negative on error.
1da177e4
LT
3623 */
3624
3625static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3626{
3627 struct ata_port *ap = qc->ap;
3628 int dir = qc->dma_dir;
cedc9a47 3629 struct scatterlist *sg = qc->__sg;
1da177e4 3630 dma_addr_t dma_address;
2e242fa9 3631 int trim_sg = 0;
1da177e4 3632
cedc9a47
JG
3633 /* we must lengthen transfers to end on a 32-bit boundary */
3634 qc->pad_len = sg->length & 3;
3635 if (qc->pad_len) {
3636 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3637 struct scatterlist *psg = &qc->pad_sgent;
3638
a4631474 3639 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3640
3641 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3642
3643 if (qc->tf.flags & ATA_TFLAG_WRITE)
3644 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3645 qc->pad_len);
3646
3647 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3648 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3649 /* trim sg */
3650 sg->length -= qc->pad_len;
2e242fa9
TH
3651 if (sg->length == 0)
3652 trim_sg = 1;
cedc9a47
JG
3653
3654 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3655 sg->length, qc->pad_len);
3656 }
3657
2e242fa9
TH
3658 if (trim_sg) {
3659 qc->n_elem--;
e1410f2d
JG
3660 goto skip_map;
3661 }
3662
2f1f610b 3663 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3664 sg->length, dir);
537a95d9
TH
3665 if (dma_mapping_error(dma_address)) {
3666 /* restore sg */
3667 sg->length += qc->pad_len;
1da177e4 3668 return -1;
537a95d9 3669 }
1da177e4
LT
3670
3671 sg_dma_address(sg) = dma_address;
32529e01 3672 sg_dma_len(sg) = sg->length;
1da177e4 3673
2e242fa9 3674skip_map:
1da177e4
LT
3675 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3676 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3677
3678 return 0;
3679}
3680
3681/**
0cba632b
JG
3682 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3683 * @qc: Command with scatter-gather table to be mapped.
3684 *
3685 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3686 *
3687 * LOCKING:
cca3974e 3688 * spin_lock_irqsave(host lock)
1da177e4
LT
3689 *
3690 * RETURNS:
0cba632b 3691 * Zero on success, negative on error.
1da177e4
LT
3692 *
3693 */
3694
3695static int ata_sg_setup(struct ata_queued_cmd *qc)
3696{
3697 struct ata_port *ap = qc->ap;
cedc9a47
JG
3698 struct scatterlist *sg = qc->__sg;
3699 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3700 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4
LT
3701
3702 VPRINTK("ENTER, ata%u\n", ap->id);
a4631474 3703 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3704
cedc9a47
JG
3705 /* we must lengthen transfers to end on a 32-bit boundary */
3706 qc->pad_len = lsg->length & 3;
3707 if (qc->pad_len) {
3708 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3709 struct scatterlist *psg = &qc->pad_sgent;
3710 unsigned int offset;
3711
a4631474 3712 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3713
3714 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3715
3716 /*
3717 * psg->page/offset are used to copy to-be-written
3718 * data in this function or read data in ata_sg_clean.
3719 */
3720 offset = lsg->offset + lsg->length - qc->pad_len;
3721 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3722 psg->offset = offset_in_page(offset);
3723
3724 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3725 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3726 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3727 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3728 }
3729
3730 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3731 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3732 /* trim last sg */
3733 lsg->length -= qc->pad_len;
e1410f2d
JG
3734 if (lsg->length == 0)
3735 trim_sg = 1;
cedc9a47
JG
3736
3737 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3738 qc->n_elem - 1, lsg->length, qc->pad_len);
3739 }
3740
e1410f2d
JG
3741 pre_n_elem = qc->n_elem;
3742 if (trim_sg && pre_n_elem)
3743 pre_n_elem--;
3744
3745 if (!pre_n_elem) {
3746 n_elem = 0;
3747 goto skip_map;
3748 }
3749
1da177e4 3750 dir = qc->dma_dir;
2f1f610b 3751 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3752 if (n_elem < 1) {
3753 /* restore last sg */
3754 lsg->length += qc->pad_len;
1da177e4 3755 return -1;
537a95d9 3756 }
1da177e4
LT
3757
3758 DPRINTK("%d sg elements mapped\n", n_elem);
3759
e1410f2d 3760skip_map:
1da177e4
LT
3761 qc->n_elem = n_elem;
3762
3763 return 0;
3764}
3765
0baab86b 3766/**
c893a3ae 3767 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3768 * @buf: Buffer to swap
3769 * @buf_words: Number of 16-bit words in buffer.
3770 *
3771 * Swap halves of 16-bit words if needed to convert from
3772 * little-endian byte order to native cpu byte order, or
3773 * vice-versa.
3774 *
3775 * LOCKING:
6f0ef4fa 3776 * Inherited from caller.
0baab86b 3777 */
1da177e4
LT
3778void swap_buf_le16(u16 *buf, unsigned int buf_words)
3779{
3780#ifdef __BIG_ENDIAN
3781 unsigned int i;
3782
3783 for (i = 0; i < buf_words; i++)
3784 buf[i] = le16_to_cpu(buf[i]);
3785#endif /* __BIG_ENDIAN */
3786}
3787
6ae4cfb5 3788/**
0d5ff566 3789 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 3790 * @adev: device to target
6ae4cfb5
AL
3791 * @buf: data buffer
3792 * @buflen: buffer length
344babaa 3793 * @write_data: read/write
6ae4cfb5
AL
3794 *
3795 * Transfer data from/to the device data register by PIO.
3796 *
3797 * LOCKING:
3798 * Inherited from caller.
6ae4cfb5 3799 */
0d5ff566
TH
3800void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3801 unsigned int buflen, int write_data)
1da177e4 3802{
a6b2c5d4 3803 struct ata_port *ap = adev->ap;
6ae4cfb5 3804 unsigned int words = buflen >> 1;
1da177e4 3805
6ae4cfb5 3806 /* Transfer multiple of 2 bytes */
1da177e4 3807 if (write_data)
0d5ff566 3808 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 3809 else
0d5ff566 3810 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
3811
3812 /* Transfer trailing 1 byte, if any. */
3813 if (unlikely(buflen & 0x01)) {
3814 u16 align_buf[1] = { 0 };
3815 unsigned char *trailing_buf = buf + buflen - 1;
3816
3817 if (write_data) {
3818 memcpy(align_buf, trailing_buf, 1);
0d5ff566 3819 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 3820 } else {
0d5ff566 3821 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
3822 memcpy(trailing_buf, align_buf, 1);
3823 }
3824 }
1da177e4
LT
3825}
3826
75e99585 3827/**
0d5ff566 3828 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
3829 * @adev: device to target
3830 * @buf: data buffer
3831 * @buflen: buffer length
3832 * @write_data: read/write
3833 *
88574551 3834 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
3835 * transfer with interrupts disabled.
3836 *
3837 * LOCKING:
3838 * Inherited from caller.
3839 */
0d5ff566
TH
3840void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3841 unsigned int buflen, int write_data)
75e99585
AC
3842{
3843 unsigned long flags;
3844 local_irq_save(flags);
0d5ff566 3845 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
3846 local_irq_restore(flags);
3847}
3848
3849
6ae4cfb5
AL
3850/**
3851 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3852 * @qc: Command on going
3853 *
3854 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3855 *
3856 * LOCKING:
3857 * Inherited from caller.
3858 */
3859
1da177e4
LT
3860static void ata_pio_sector(struct ata_queued_cmd *qc)
3861{
3862 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3863 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3864 struct ata_port *ap = qc->ap;
3865 struct page *page;
3866 unsigned int offset;
3867 unsigned char *buf;
3868
726f0785 3869 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
14be71f4 3870 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3871
3872 page = sg[qc->cursg].page;
726f0785 3873 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
3874
3875 /* get the current page and offset */
3876 page = nth_page(page, (offset >> PAGE_SHIFT));
3877 offset %= PAGE_SIZE;
3878
1da177e4
LT
3879 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3880
91b8b313
AL
3881 if (PageHighMem(page)) {
3882 unsigned long flags;
3883
a6b2c5d4 3884 /* FIXME: use a bounce buffer */
91b8b313
AL
3885 local_irq_save(flags);
3886 buf = kmap_atomic(page, KM_IRQ0);
083958d3 3887
91b8b313 3888 /* do the actual data transfer */
a6b2c5d4 3889 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 3890
91b8b313
AL
3891 kunmap_atomic(buf, KM_IRQ0);
3892 local_irq_restore(flags);
3893 } else {
3894 buf = page_address(page);
a6b2c5d4 3895 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 3896 }
1da177e4 3897
726f0785
TH
3898 qc->curbytes += ATA_SECT_SIZE;
3899 qc->cursg_ofs += ATA_SECT_SIZE;
1da177e4 3900
726f0785 3901 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
3902 qc->cursg++;
3903 qc->cursg_ofs = 0;
3904 }
1da177e4 3905}
1da177e4 3906
07f6f7d0
AL
3907/**
3908 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3909 * @qc: Command on going
3910 *
c81e29b4 3911 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
3912 * ATA device for the DRQ request.
3913 *
3914 * LOCKING:
3915 * Inherited from caller.
3916 */
1da177e4 3917
07f6f7d0
AL
3918static void ata_pio_sectors(struct ata_queued_cmd *qc)
3919{
3920 if (is_multi_taskfile(&qc->tf)) {
3921 /* READ/WRITE MULTIPLE */
3922 unsigned int nsect;
3923
587005de 3924 WARN_ON(qc->dev->multi_count == 0);
1da177e4 3925
726f0785
TH
3926 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
3927 qc->dev->multi_count);
07f6f7d0
AL
3928 while (nsect--)
3929 ata_pio_sector(qc);
3930 } else
3931 ata_pio_sector(qc);
3932}
3933
c71c1857
AL
3934/**
3935 * atapi_send_cdb - Write CDB bytes to hardware
3936 * @ap: Port to which ATAPI device is attached.
3937 * @qc: Taskfile currently active
3938 *
3939 * When device has indicated its readiness to accept
3940 * a CDB, this function is called. Send the CDB.
3941 *
3942 * LOCKING:
3943 * caller.
3944 */
3945
3946static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3947{
3948 /* send SCSI cdb */
3949 DPRINTK("send cdb\n");
db024d53 3950 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 3951
a6b2c5d4 3952 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
3953 ata_altstatus(ap); /* flush */
3954
3955 switch (qc->tf.protocol) {
3956 case ATA_PROT_ATAPI:
3957 ap->hsm_task_state = HSM_ST;
3958 break;
3959 case ATA_PROT_ATAPI_NODATA:
3960 ap->hsm_task_state = HSM_ST_LAST;
3961 break;
3962 case ATA_PROT_ATAPI_DMA:
3963 ap->hsm_task_state = HSM_ST_LAST;
3964 /* initiate bmdma */
3965 ap->ops->bmdma_start(qc);
3966 break;
3967 }
1da177e4
LT
3968}
3969
6ae4cfb5
AL
3970/**
3971 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3972 * @qc: Command on going
3973 * @bytes: number of bytes
3974 *
3975 * Transfer Transfer data from/to the ATAPI device.
3976 *
3977 * LOCKING:
3978 * Inherited from caller.
3979 *
3980 */
3981
1da177e4
LT
3982static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3983{
3984 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3985 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3986 struct ata_port *ap = qc->ap;
3987 struct page *page;
3988 unsigned char *buf;
3989 unsigned int offset, count;
3990
563a6e1f 3991 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 3992 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3993
3994next_sg:
563a6e1f 3995 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 3996 /*
563a6e1f
AL
3997 * The end of qc->sg is reached and the device expects
3998 * more data to transfer. In order not to overrun qc->sg
3999 * and fulfill length specified in the byte count register,
4000 * - for read case, discard trailing data from the device
4001 * - for write case, padding zero data to the device
4002 */
4003 u16 pad_buf[1] = { 0 };
4004 unsigned int words = bytes >> 1;
4005 unsigned int i;
4006
4007 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4008 ata_dev_printk(qc->dev, KERN_WARNING,
4009 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4010
4011 for (i = 0; i < words; i++)
a6b2c5d4 4012 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4013
14be71f4 4014 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4015 return;
4016 }
4017
cedc9a47 4018 sg = &qc->__sg[qc->cursg];
1da177e4 4019
1da177e4
LT
4020 page = sg->page;
4021 offset = sg->offset + qc->cursg_ofs;
4022
4023 /* get the current page and offset */
4024 page = nth_page(page, (offset >> PAGE_SHIFT));
4025 offset %= PAGE_SIZE;
4026
6952df03 4027 /* don't overrun current sg */
32529e01 4028 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4029
4030 /* don't cross page boundaries */
4031 count = min(count, (unsigned int)PAGE_SIZE - offset);
4032
7282aa4b
AL
4033 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4034
91b8b313
AL
4035 if (PageHighMem(page)) {
4036 unsigned long flags;
4037
a6b2c5d4 4038 /* FIXME: use bounce buffer */
91b8b313
AL
4039 local_irq_save(flags);
4040 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4041
91b8b313 4042 /* do the actual data transfer */
a6b2c5d4 4043 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4044
91b8b313
AL
4045 kunmap_atomic(buf, KM_IRQ0);
4046 local_irq_restore(flags);
4047 } else {
4048 buf = page_address(page);
a6b2c5d4 4049 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4050 }
1da177e4
LT
4051
4052 bytes -= count;
4053 qc->curbytes += count;
4054 qc->cursg_ofs += count;
4055
32529e01 4056 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4057 qc->cursg++;
4058 qc->cursg_ofs = 0;
4059 }
4060
563a6e1f 4061 if (bytes)
1da177e4 4062 goto next_sg;
1da177e4
LT
4063}
4064
6ae4cfb5
AL
4065/**
4066 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4067 * @qc: Command on going
4068 *
4069 * Transfer Transfer data from/to the ATAPI device.
4070 *
4071 * LOCKING:
4072 * Inherited from caller.
6ae4cfb5
AL
4073 */
4074
1da177e4
LT
4075static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4076{
4077 struct ata_port *ap = qc->ap;
4078 struct ata_device *dev = qc->dev;
4079 unsigned int ireason, bc_lo, bc_hi, bytes;
4080 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4081
eec4c3f3
AL
4082 /* Abuse qc->result_tf for temp storage of intermediate TF
4083 * here to save some kernel stack usage.
4084 * For normal completion, qc->result_tf is not relevant. For
4085 * error, qc->result_tf is later overwritten by ata_qc_complete().
4086 * So, the correctness of qc->result_tf is not affected.
4087 */
4088 ap->ops->tf_read(ap, &qc->result_tf);
4089 ireason = qc->result_tf.nsect;
4090 bc_lo = qc->result_tf.lbam;
4091 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4092 bytes = (bc_hi << 8) | bc_lo;
4093
4094 /* shall be cleared to zero, indicating xfer of data */
4095 if (ireason & (1 << 0))
4096 goto err_out;
4097
4098 /* make sure transfer direction matches expected */
4099 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4100 if (do_write != i_write)
4101 goto err_out;
4102
312f7da2
AL
4103 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
4104
1da177e4
LT
4105 __atapi_pio_bytes(qc, bytes);
4106
4107 return;
4108
4109err_out:
f15a1daf 4110 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4111 qc->err_mask |= AC_ERR_HSM;
14be71f4 4112 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4113}
4114
4115/**
c234fb00
AL
4116 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4117 * @ap: the target ata_port
4118 * @qc: qc on going
1da177e4 4119 *
c234fb00
AL
4120 * RETURNS:
4121 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4122 */
c234fb00
AL
4123
4124static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4125{
c234fb00
AL
4126 if (qc->tf.flags & ATA_TFLAG_POLLING)
4127 return 1;
1da177e4 4128
c234fb00
AL
4129 if (ap->hsm_task_state == HSM_ST_FIRST) {
4130 if (qc->tf.protocol == ATA_PROT_PIO &&
4131 (qc->tf.flags & ATA_TFLAG_WRITE))
4132 return 1;
1da177e4 4133
c234fb00
AL
4134 if (is_atapi_taskfile(&qc->tf) &&
4135 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4136 return 1;
fe79e683
AL
4137 }
4138
c234fb00
AL
4139 return 0;
4140}
1da177e4 4141
c17ea20d
TH
4142/**
4143 * ata_hsm_qc_complete - finish a qc running on standard HSM
4144 * @qc: Command to complete
4145 * @in_wq: 1 if called from workqueue, 0 otherwise
4146 *
4147 * Finish @qc which is running on standard HSM.
4148 *
4149 * LOCKING:
cca3974e 4150 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4151 * Otherwise, none on entry and grabs host lock.
4152 */
4153static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4154{
4155 struct ata_port *ap = qc->ap;
4156 unsigned long flags;
4157
4158 if (ap->ops->error_handler) {
4159 if (in_wq) {
ba6a1308 4160 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4161
cca3974e
JG
4162 /* EH might have kicked in while host lock is
4163 * released.
c17ea20d
TH
4164 */
4165 qc = ata_qc_from_tag(ap, qc->tag);
4166 if (qc) {
4167 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4168 ap->ops->irq_on(ap);
c17ea20d
TH
4169 ata_qc_complete(qc);
4170 } else
4171 ata_port_freeze(ap);
4172 }
4173
ba6a1308 4174 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4175 } else {
4176 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4177 ata_qc_complete(qc);
4178 else
4179 ata_port_freeze(ap);
4180 }
4181 } else {
4182 if (in_wq) {
ba6a1308 4183 spin_lock_irqsave(ap->lock, flags);
83625006 4184 ap->ops->irq_on(ap);
c17ea20d 4185 ata_qc_complete(qc);
ba6a1308 4186 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4187 } else
4188 ata_qc_complete(qc);
4189 }
1da177e4 4190
c81e29b4 4191 ata_altstatus(ap); /* flush */
c17ea20d
TH
4192}
4193
bb5cb290
AL
4194/**
4195 * ata_hsm_move - move the HSM to the next state.
4196 * @ap: the target ata_port
4197 * @qc: qc on going
4198 * @status: current device status
4199 * @in_wq: 1 if called from workqueue, 0 otherwise
4200 *
4201 * RETURNS:
4202 * 1 when poll next status needed, 0 otherwise.
4203 */
9a1004d0
TH
4204int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4205 u8 status, int in_wq)
e2cec771 4206{
bb5cb290
AL
4207 unsigned long flags = 0;
4208 int poll_next;
4209
6912ccd5
AL
4210 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4211
bb5cb290
AL
4212 /* Make sure ata_qc_issue_prot() does not throw things
4213 * like DMA polling into the workqueue. Notice that
4214 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4215 */
c234fb00 4216 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4217
e2cec771 4218fsm_start:
999bb6f4
AL
4219 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4220 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4221
e2cec771
AL
4222 switch (ap->hsm_task_state) {
4223 case HSM_ST_FIRST:
bb5cb290
AL
4224 /* Send first data block or PACKET CDB */
4225
4226 /* If polling, we will stay in the work queue after
4227 * sending the data. Otherwise, interrupt handler
4228 * takes over after sending the data.
4229 */
4230 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4231
e2cec771 4232 /* check device status */
3655d1d3
AL
4233 if (unlikely((status & ATA_DRQ) == 0)) {
4234 /* handle BSY=0, DRQ=0 as error */
4235 if (likely(status & (ATA_ERR | ATA_DF)))
4236 /* device stops HSM for abort/error */
4237 qc->err_mask |= AC_ERR_DEV;
4238 else
4239 /* HSM violation. Let EH handle this */
4240 qc->err_mask |= AC_ERR_HSM;
4241
14be71f4 4242 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4243 goto fsm_start;
1da177e4
LT
4244 }
4245
71601958
AL
4246 /* Device should not ask for data transfer (DRQ=1)
4247 * when it finds something wrong.
eee6c32f
AL
4248 * We ignore DRQ here and stop the HSM by
4249 * changing hsm_task_state to HSM_ST_ERR and
4250 * let the EH abort the command or reset the device.
71601958
AL
4251 */
4252 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4253 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4254 ap->id, status);
3655d1d3 4255 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4256 ap->hsm_task_state = HSM_ST_ERR;
4257 goto fsm_start;
71601958 4258 }
1da177e4 4259
bb5cb290
AL
4260 /* Send the CDB (atapi) or the first data block (ata pio out).
4261 * During the state transition, interrupt handler shouldn't
4262 * be invoked before the data transfer is complete and
4263 * hsm_task_state is changed. Hence, the following locking.
4264 */
4265 if (in_wq)
ba6a1308 4266 spin_lock_irqsave(ap->lock, flags);
1da177e4 4267
bb5cb290
AL
4268 if (qc->tf.protocol == ATA_PROT_PIO) {
4269 /* PIO data out protocol.
4270 * send first data block.
4271 */
0565c26d 4272
bb5cb290
AL
4273 /* ata_pio_sectors() might change the state
4274 * to HSM_ST_LAST. so, the state is changed here
4275 * before ata_pio_sectors().
4276 */
4277 ap->hsm_task_state = HSM_ST;
4278 ata_pio_sectors(qc);
4279 ata_altstatus(ap); /* flush */
4280 } else
4281 /* send CDB */
4282 atapi_send_cdb(ap, qc);
4283
4284 if (in_wq)
ba6a1308 4285 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4286
4287 /* if polling, ata_pio_task() handles the rest.
4288 * otherwise, interrupt handler takes over from here.
4289 */
e2cec771 4290 break;
1c848984 4291
e2cec771
AL
4292 case HSM_ST:
4293 /* complete command or read/write the data register */
4294 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4295 /* ATAPI PIO protocol */
4296 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4297 /* No more data to transfer or device error.
4298 * Device error will be tagged in HSM_ST_LAST.
4299 */
e2cec771
AL
4300 ap->hsm_task_state = HSM_ST_LAST;
4301 goto fsm_start;
4302 }
1da177e4 4303
71601958
AL
4304 /* Device should not ask for data transfer (DRQ=1)
4305 * when it finds something wrong.
eee6c32f
AL
4306 * We ignore DRQ here and stop the HSM by
4307 * changing hsm_task_state to HSM_ST_ERR and
4308 * let the EH abort the command or reset the device.
71601958
AL
4309 */
4310 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4311 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4312 ap->id, status);
3655d1d3 4313 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4314 ap->hsm_task_state = HSM_ST_ERR;
4315 goto fsm_start;
71601958 4316 }
1da177e4 4317
e2cec771 4318 atapi_pio_bytes(qc);
7fb6ec28 4319
e2cec771
AL
4320 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4321 /* bad ireason reported by device */
4322 goto fsm_start;
1da177e4 4323
e2cec771
AL
4324 } else {
4325 /* ATA PIO protocol */
4326 if (unlikely((status & ATA_DRQ) == 0)) {
4327 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4328 if (likely(status & (ATA_ERR | ATA_DF)))
4329 /* device stops HSM for abort/error */
4330 qc->err_mask |= AC_ERR_DEV;
4331 else
55a8e2c8
TH
4332 /* HSM violation. Let EH handle this.
4333 * Phantom devices also trigger this
4334 * condition. Mark hint.
4335 */
4336 qc->err_mask |= AC_ERR_HSM |
4337 AC_ERR_NODEV_HINT;
3655d1d3 4338
e2cec771
AL
4339 ap->hsm_task_state = HSM_ST_ERR;
4340 goto fsm_start;
4341 }
1da177e4 4342
eee6c32f
AL
4343 /* For PIO reads, some devices may ask for
4344 * data transfer (DRQ=1) alone with ERR=1.
4345 * We respect DRQ here and transfer one
4346 * block of junk data before changing the
4347 * hsm_task_state to HSM_ST_ERR.
4348 *
4349 * For PIO writes, ERR=1 DRQ=1 doesn't make
4350 * sense since the data block has been
4351 * transferred to the device.
71601958
AL
4352 */
4353 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4354 /* data might be corrputed */
4355 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4356
4357 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4358 ata_pio_sectors(qc);
4359 ata_altstatus(ap);
4360 status = ata_wait_idle(ap);
4361 }
4362
3655d1d3
AL
4363 if (status & (ATA_BUSY | ATA_DRQ))
4364 qc->err_mask |= AC_ERR_HSM;
4365
eee6c32f
AL
4366 /* ata_pio_sectors() might change the
4367 * state to HSM_ST_LAST. so, the state
4368 * is changed after ata_pio_sectors().
4369 */
4370 ap->hsm_task_state = HSM_ST_ERR;
4371 goto fsm_start;
71601958
AL
4372 }
4373
e2cec771
AL
4374 ata_pio_sectors(qc);
4375
4376 if (ap->hsm_task_state == HSM_ST_LAST &&
4377 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4378 /* all data read */
4379 ata_altstatus(ap);
52a32205 4380 status = ata_wait_idle(ap);
e2cec771
AL
4381 goto fsm_start;
4382 }
4383 }
4384
4385 ata_altstatus(ap); /* flush */
bb5cb290 4386 poll_next = 1;
1da177e4
LT
4387 break;
4388
14be71f4 4389 case HSM_ST_LAST:
6912ccd5
AL
4390 if (unlikely(!ata_ok(status))) {
4391 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4392 ap->hsm_task_state = HSM_ST_ERR;
4393 goto fsm_start;
4394 }
4395
4396 /* no more data to transfer */
4332a771
AL
4397 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4398 ap->id, qc->dev->devno, status);
e2cec771 4399
6912ccd5
AL
4400 WARN_ON(qc->err_mask);
4401
e2cec771 4402 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4403
e2cec771 4404 /* complete taskfile transaction */
c17ea20d 4405 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4406
4407 poll_next = 0;
1da177e4
LT
4408 break;
4409
14be71f4 4410 case HSM_ST_ERR:
e2cec771
AL
4411 /* make sure qc->err_mask is available to
4412 * know what's wrong and recover
4413 */
4414 WARN_ON(qc->err_mask == 0);
4415
4416 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4417
999bb6f4 4418 /* complete taskfile transaction */
c17ea20d 4419 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4420
4421 poll_next = 0;
e2cec771
AL
4422 break;
4423 default:
bb5cb290 4424 poll_next = 0;
6912ccd5 4425 BUG();
1da177e4
LT
4426 }
4427
bb5cb290 4428 return poll_next;
1da177e4
LT
4429}
4430
65f27f38 4431static void ata_pio_task(struct work_struct *work)
8061f5f0 4432{
65f27f38
DH
4433 struct ata_port *ap =
4434 container_of(work, struct ata_port, port_task.work);
4435 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4436 u8 status;
a1af3734 4437 int poll_next;
8061f5f0 4438
7fb6ec28 4439fsm_start:
a1af3734 4440 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4441
a1af3734
AL
4442 /*
4443 * This is purely heuristic. This is a fast path.
4444 * Sometimes when we enter, BSY will be cleared in
4445 * a chk-status or two. If not, the drive is probably seeking
4446 * or something. Snooze for a couple msecs, then
4447 * chk-status again. If still busy, queue delayed work.
4448 */
4449 status = ata_busy_wait(ap, ATA_BUSY, 5);
4450 if (status & ATA_BUSY) {
4451 msleep(2);
4452 status = ata_busy_wait(ap, ATA_BUSY, 10);
4453 if (status & ATA_BUSY) {
31ce6dae 4454 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4455 return;
4456 }
8061f5f0
TH
4457 }
4458
a1af3734
AL
4459 /* move the HSM */
4460 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4461
a1af3734
AL
4462 /* another command or interrupt handler
4463 * may be running at this point.
4464 */
4465 if (poll_next)
7fb6ec28 4466 goto fsm_start;
8061f5f0
TH
4467}
4468
1da177e4
LT
4469/**
4470 * ata_qc_new - Request an available ATA command, for queueing
4471 * @ap: Port associated with device @dev
4472 * @dev: Device from whom we request an available command structure
4473 *
4474 * LOCKING:
0cba632b 4475 * None.
1da177e4
LT
4476 */
4477
4478static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4479{
4480 struct ata_queued_cmd *qc = NULL;
4481 unsigned int i;
4482
e3180499 4483 /* no command while frozen */
b51e9e5d 4484 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4485 return NULL;
4486
2ab7db1f
TH
4487 /* the last tag is reserved for internal command. */
4488 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4489 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4490 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4491 break;
4492 }
4493
4494 if (qc)
4495 qc->tag = i;
4496
4497 return qc;
4498}
4499
4500/**
4501 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4502 * @dev: Device from whom we request an available command structure
4503 *
4504 * LOCKING:
0cba632b 4505 * None.
1da177e4
LT
4506 */
4507
3373efd8 4508struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4509{
3373efd8 4510 struct ata_port *ap = dev->ap;
1da177e4
LT
4511 struct ata_queued_cmd *qc;
4512
4513 qc = ata_qc_new(ap);
4514 if (qc) {
1da177e4
LT
4515 qc->scsicmd = NULL;
4516 qc->ap = ap;
4517 qc->dev = dev;
1da177e4 4518
2c13b7ce 4519 ata_qc_reinit(qc);
1da177e4
LT
4520 }
4521
4522 return qc;
4523}
4524
1da177e4
LT
4525/**
4526 * ata_qc_free - free unused ata_queued_cmd
4527 * @qc: Command to complete
4528 *
4529 * Designed to free unused ata_queued_cmd object
4530 * in case something prevents using it.
4531 *
4532 * LOCKING:
cca3974e 4533 * spin_lock_irqsave(host lock)
1da177e4
LT
4534 */
4535void ata_qc_free(struct ata_queued_cmd *qc)
4536{
4ba946e9
TH
4537 struct ata_port *ap = qc->ap;
4538 unsigned int tag;
4539
a4631474 4540 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4541
4ba946e9
TH
4542 qc->flags = 0;
4543 tag = qc->tag;
4544 if (likely(ata_tag_valid(tag))) {
4ba946e9 4545 qc->tag = ATA_TAG_POISON;
6cec4a39 4546 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4547 }
1da177e4
LT
4548}
4549
76014427 4550void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4551{
dedaf2b0
TH
4552 struct ata_port *ap = qc->ap;
4553
a4631474
TH
4554 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4555 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4556
4557 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4558 ata_sg_clean(qc);
4559
7401abf2 4560 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4561 if (qc->tf.protocol == ATA_PROT_NCQ)
4562 ap->sactive &= ~(1 << qc->tag);
4563 else
4564 ap->active_tag = ATA_TAG_POISON;
7401abf2 4565
3f3791d3
AL
4566 /* atapi: mark qc as inactive to prevent the interrupt handler
4567 * from completing the command twice later, before the error handler
4568 * is called. (when rc != 0 and atapi request sense is needed)
4569 */
4570 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4571 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4572
1da177e4 4573 /* call completion callback */
77853bf2 4574 qc->complete_fn(qc);
1da177e4
LT
4575}
4576
39599a53
TH
4577static void fill_result_tf(struct ata_queued_cmd *qc)
4578{
4579 struct ata_port *ap = qc->ap;
4580
4581 ap->ops->tf_read(ap, &qc->result_tf);
4582 qc->result_tf.flags = qc->tf.flags;
4583}
4584
f686bcb8
TH
4585/**
4586 * ata_qc_complete - Complete an active ATA command
4587 * @qc: Command to complete
4588 * @err_mask: ATA Status register contents
4589 *
4590 * Indicate to the mid and upper layers that an ATA
4591 * command has completed, with either an ok or not-ok status.
4592 *
4593 * LOCKING:
cca3974e 4594 * spin_lock_irqsave(host lock)
f686bcb8
TH
4595 */
4596void ata_qc_complete(struct ata_queued_cmd *qc)
4597{
4598 struct ata_port *ap = qc->ap;
4599
4600 /* XXX: New EH and old EH use different mechanisms to
4601 * synchronize EH with regular execution path.
4602 *
4603 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4604 * Normal execution path is responsible for not accessing a
4605 * failed qc. libata core enforces the rule by returning NULL
4606 * from ata_qc_from_tag() for failed qcs.
4607 *
4608 * Old EH depends on ata_qc_complete() nullifying completion
4609 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4610 * not synchronize with interrupt handler. Only PIO task is
4611 * taken care of.
4612 */
4613 if (ap->ops->error_handler) {
b51e9e5d 4614 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4615
4616 if (unlikely(qc->err_mask))
4617 qc->flags |= ATA_QCFLAG_FAILED;
4618
4619 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4620 if (!ata_tag_internal(qc->tag)) {
4621 /* always fill result TF for failed qc */
39599a53 4622 fill_result_tf(qc);
f686bcb8
TH
4623 ata_qc_schedule_eh(qc);
4624 return;
4625 }
4626 }
4627
4628 /* read result TF if requested */
4629 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4630 fill_result_tf(qc);
f686bcb8
TH
4631
4632 __ata_qc_complete(qc);
4633 } else {
4634 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4635 return;
4636
4637 /* read result TF if failed or requested */
4638 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4639 fill_result_tf(qc);
f686bcb8
TH
4640
4641 __ata_qc_complete(qc);
4642 }
4643}
4644
dedaf2b0
TH
4645/**
4646 * ata_qc_complete_multiple - Complete multiple qcs successfully
4647 * @ap: port in question
4648 * @qc_active: new qc_active mask
4649 * @finish_qc: LLDD callback invoked before completing a qc
4650 *
4651 * Complete in-flight commands. This functions is meant to be
4652 * called from low-level driver's interrupt routine to complete
4653 * requests normally. ap->qc_active and @qc_active is compared
4654 * and commands are completed accordingly.
4655 *
4656 * LOCKING:
cca3974e 4657 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4658 *
4659 * RETURNS:
4660 * Number of completed commands on success, -errno otherwise.
4661 */
4662int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4663 void (*finish_qc)(struct ata_queued_cmd *))
4664{
4665 int nr_done = 0;
4666 u32 done_mask;
4667 int i;
4668
4669 done_mask = ap->qc_active ^ qc_active;
4670
4671 if (unlikely(done_mask & qc_active)) {
4672 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4673 "(%08x->%08x)\n", ap->qc_active, qc_active);
4674 return -EINVAL;
4675 }
4676
4677 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4678 struct ata_queued_cmd *qc;
4679
4680 if (!(done_mask & (1 << i)))
4681 continue;
4682
4683 if ((qc = ata_qc_from_tag(ap, i))) {
4684 if (finish_qc)
4685 finish_qc(qc);
4686 ata_qc_complete(qc);
4687 nr_done++;
4688 }
4689 }
4690
4691 return nr_done;
4692}
4693
1da177e4
LT
4694static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4695{
4696 struct ata_port *ap = qc->ap;
4697
4698 switch (qc->tf.protocol) {
3dc1d881 4699 case ATA_PROT_NCQ:
1da177e4
LT
4700 case ATA_PROT_DMA:
4701 case ATA_PROT_ATAPI_DMA:
4702 return 1;
4703
4704 case ATA_PROT_ATAPI:
4705 case ATA_PROT_PIO:
1da177e4
LT
4706 if (ap->flags & ATA_FLAG_PIO_DMA)
4707 return 1;
4708
4709 /* fall through */
4710
4711 default:
4712 return 0;
4713 }
4714
4715 /* never reached */
4716}
4717
4718/**
4719 * ata_qc_issue - issue taskfile to device
4720 * @qc: command to issue to device
4721 *
4722 * Prepare an ATA command to submission to device.
4723 * This includes mapping the data into a DMA-able
4724 * area, filling in the S/G table, and finally
4725 * writing the taskfile to hardware, starting the command.
4726 *
4727 * LOCKING:
cca3974e 4728 * spin_lock_irqsave(host lock)
1da177e4 4729 */
8e0e694a 4730void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4731{
4732 struct ata_port *ap = qc->ap;
4733
dedaf2b0
TH
4734 /* Make sure only one non-NCQ command is outstanding. The
4735 * check is skipped for old EH because it reuses active qc to
4736 * request ATAPI sense.
4737 */
4738 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4739
4740 if (qc->tf.protocol == ATA_PROT_NCQ) {
4741 WARN_ON(ap->sactive & (1 << qc->tag));
4742 ap->sactive |= 1 << qc->tag;
4743 } else {
4744 WARN_ON(ap->sactive);
4745 ap->active_tag = qc->tag;
4746 }
4747
e4a70e76 4748 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4749 ap->qc_active |= 1 << qc->tag;
e4a70e76 4750
1da177e4
LT
4751 if (ata_should_dma_map(qc)) {
4752 if (qc->flags & ATA_QCFLAG_SG) {
4753 if (ata_sg_setup(qc))
8e436af9 4754 goto sg_err;
1da177e4
LT
4755 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4756 if (ata_sg_setup_one(qc))
8e436af9 4757 goto sg_err;
1da177e4
LT
4758 }
4759 } else {
4760 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4761 }
4762
4763 ap->ops->qc_prep(qc);
4764
8e0e694a
TH
4765 qc->err_mask |= ap->ops->qc_issue(qc);
4766 if (unlikely(qc->err_mask))
4767 goto err;
4768 return;
1da177e4 4769
8e436af9
TH
4770sg_err:
4771 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4772 qc->err_mask |= AC_ERR_SYSTEM;
4773err:
4774 ata_qc_complete(qc);
1da177e4
LT
4775}
4776
4777/**
4778 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4779 * @qc: command to issue to device
4780 *
4781 * Using various libata functions and hooks, this function
4782 * starts an ATA command. ATA commands are grouped into
4783 * classes called "protocols", and issuing each type of protocol
4784 * is slightly different.
4785 *
0baab86b
EF
4786 * May be used as the qc_issue() entry in ata_port_operations.
4787 *
1da177e4 4788 * LOCKING:
cca3974e 4789 * spin_lock_irqsave(host lock)
1da177e4
LT
4790 *
4791 * RETURNS:
9a3d9eb0 4792 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4793 */
4794
9a3d9eb0 4795unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4796{
4797 struct ata_port *ap = qc->ap;
4798
e50362ec
AL
4799 /* Use polling pio if the LLD doesn't handle
4800 * interrupt driven pio and atapi CDB interrupt.
4801 */
4802 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4803 switch (qc->tf.protocol) {
4804 case ATA_PROT_PIO:
e3472cbe 4805 case ATA_PROT_NODATA:
e50362ec
AL
4806 case ATA_PROT_ATAPI:
4807 case ATA_PROT_ATAPI_NODATA:
4808 qc->tf.flags |= ATA_TFLAG_POLLING;
4809 break;
4810 case ATA_PROT_ATAPI_DMA:
4811 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4812 /* see ata_dma_blacklisted() */
e50362ec
AL
4813 BUG();
4814 break;
4815 default:
4816 break;
4817 }
4818 }
4819
3d3cca37
TH
4820 /* Some controllers show flaky interrupt behavior after
4821 * setting xfer mode. Use polling instead.
4822 */
4823 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4824 qc->tf.feature == SETFEATURES_XFER) &&
4825 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4826 qc->tf.flags |= ATA_TFLAG_POLLING;
4827
312f7da2 4828 /* select the device */
1da177e4
LT
4829 ata_dev_select(ap, qc->dev->devno, 1, 0);
4830
312f7da2 4831 /* start the command */
1da177e4
LT
4832 switch (qc->tf.protocol) {
4833 case ATA_PROT_NODATA:
312f7da2
AL
4834 if (qc->tf.flags & ATA_TFLAG_POLLING)
4835 ata_qc_set_polling(qc);
4836
e5338254 4837 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
4838 ap->hsm_task_state = HSM_ST_LAST;
4839
4840 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4841 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 4842
1da177e4
LT
4843 break;
4844
4845 case ATA_PROT_DMA:
587005de 4846 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4847
1da177e4
LT
4848 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4849 ap->ops->bmdma_setup(qc); /* set up bmdma */
4850 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 4851 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4852 break;
4853
312f7da2
AL
4854 case ATA_PROT_PIO:
4855 if (qc->tf.flags & ATA_TFLAG_POLLING)
4856 ata_qc_set_polling(qc);
1da177e4 4857
e5338254 4858 ata_tf_to_host(ap, &qc->tf);
312f7da2 4859
54f00389
AL
4860 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4861 /* PIO data out protocol */
4862 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 4863 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4864
4865 /* always send first data block using
e27486db 4866 * the ata_pio_task() codepath.
54f00389 4867 */
312f7da2 4868 } else {
54f00389
AL
4869 /* PIO data in protocol */
4870 ap->hsm_task_state = HSM_ST;
4871
4872 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4873 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4874
4875 /* if polling, ata_pio_task() handles the rest.
4876 * otherwise, interrupt handler takes over from here.
4877 */
312f7da2
AL
4878 }
4879
1da177e4
LT
4880 break;
4881
1da177e4 4882 case ATA_PROT_ATAPI:
1da177e4 4883 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
4884 if (qc->tf.flags & ATA_TFLAG_POLLING)
4885 ata_qc_set_polling(qc);
4886
e5338254 4887 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 4888
312f7da2
AL
4889 ap->hsm_task_state = HSM_ST_FIRST;
4890
4891 /* send cdb by polling if no cdb interrupt */
4892 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4893 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 4894 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4895 break;
4896
4897 case ATA_PROT_ATAPI_DMA:
587005de 4898 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4899
1da177e4
LT
4900 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4901 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
4902 ap->hsm_task_state = HSM_ST_FIRST;
4903
4904 /* send cdb by polling if no cdb interrupt */
4905 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 4906 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4907 break;
4908
4909 default:
4910 WARN_ON(1);
9a3d9eb0 4911 return AC_ERR_SYSTEM;
1da177e4
LT
4912 }
4913
4914 return 0;
4915}
4916
1da177e4
LT
4917/**
4918 * ata_host_intr - Handle host interrupt for given (port, task)
4919 * @ap: Port on which interrupt arrived (possibly...)
4920 * @qc: Taskfile currently active in engine
4921 *
4922 * Handle host interrupt for given queued command. Currently,
4923 * only DMA interrupts are handled. All other commands are
4924 * handled via polling with interrupts disabled (nIEN bit).
4925 *
4926 * LOCKING:
cca3974e 4927 * spin_lock_irqsave(host lock)
1da177e4
LT
4928 *
4929 * RETURNS:
4930 * One if interrupt was handled, zero if not (shared irq).
4931 */
4932
4933inline unsigned int ata_host_intr (struct ata_port *ap,
4934 struct ata_queued_cmd *qc)
4935{
ea54763f 4936 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 4937 u8 status, host_stat = 0;
1da177e4 4938
312f7da2
AL
4939 VPRINTK("ata%u: protocol %d task_state %d\n",
4940 ap->id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 4941
312f7da2
AL
4942 /* Check whether we are expecting interrupt in this state */
4943 switch (ap->hsm_task_state) {
4944 case HSM_ST_FIRST:
6912ccd5
AL
4945 /* Some pre-ATAPI-4 devices assert INTRQ
4946 * at this state when ready to receive CDB.
4947 */
1da177e4 4948
312f7da2
AL
4949 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4950 * The flag was turned on only for atapi devices.
4951 * No need to check is_atapi_taskfile(&qc->tf) again.
4952 */
4953 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 4954 goto idle_irq;
1da177e4 4955 break;
312f7da2
AL
4956 case HSM_ST_LAST:
4957 if (qc->tf.protocol == ATA_PROT_DMA ||
4958 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4959 /* check status of DMA engine */
4960 host_stat = ap->ops->bmdma_status(ap);
4961 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4962
4963 /* if it's not our irq... */
4964 if (!(host_stat & ATA_DMA_INTR))
4965 goto idle_irq;
4966
4967 /* before we do anything else, clear DMA-Start bit */
4968 ap->ops->bmdma_stop(qc);
a4f16610
AL
4969
4970 if (unlikely(host_stat & ATA_DMA_ERR)) {
4971 /* error when transfering data to/from memory */
4972 qc->err_mask |= AC_ERR_HOST_BUS;
4973 ap->hsm_task_state = HSM_ST_ERR;
4974 }
312f7da2
AL
4975 }
4976 break;
4977 case HSM_ST:
4978 break;
1da177e4
LT
4979 default:
4980 goto idle_irq;
4981 }
4982
312f7da2
AL
4983 /* check altstatus */
4984 status = ata_altstatus(ap);
4985 if (status & ATA_BUSY)
4986 goto idle_irq;
1da177e4 4987
312f7da2
AL
4988 /* check main status, clearing INTRQ */
4989 status = ata_chk_status(ap);
4990 if (unlikely(status & ATA_BUSY))
4991 goto idle_irq;
1da177e4 4992
312f7da2
AL
4993 /* ack bmdma irq events */
4994 ap->ops->irq_clear(ap);
1da177e4 4995
bb5cb290 4996 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
4997
4998 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
4999 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5000 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5001
1da177e4
LT
5002 return 1; /* irq handled */
5003
5004idle_irq:
5005 ap->stats.idle_irq++;
5006
5007#ifdef ATA_IRQ_TRAP
5008 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5009 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5010 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5011 return 1;
1da177e4
LT
5012 }
5013#endif
5014 return 0; /* irq not handled */
5015}
5016
5017/**
5018 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5019 * @irq: irq line (unused)
cca3974e 5020 * @dev_instance: pointer to our ata_host information structure
1da177e4 5021 *
0cba632b
JG
5022 * Default interrupt handler for PCI IDE devices. Calls
5023 * ata_host_intr() for each port that is not disabled.
5024 *
1da177e4 5025 * LOCKING:
cca3974e 5026 * Obtains host lock during operation.
1da177e4
LT
5027 *
5028 * RETURNS:
0cba632b 5029 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5030 */
5031
7d12e780 5032irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5033{
cca3974e 5034 struct ata_host *host = dev_instance;
1da177e4
LT
5035 unsigned int i;
5036 unsigned int handled = 0;
5037 unsigned long flags;
5038
5039 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5040 spin_lock_irqsave(&host->lock, flags);
1da177e4 5041
cca3974e 5042 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5043 struct ata_port *ap;
5044
cca3974e 5045 ap = host->ports[i];
c1389503 5046 if (ap &&
029f5468 5047 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5048 struct ata_queued_cmd *qc;
5049
5050 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5051 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5052 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5053 handled |= ata_host_intr(ap, qc);
5054 }
5055 }
5056
cca3974e 5057 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5058
5059 return IRQ_RETVAL(handled);
5060}
5061
34bf2170
TH
5062/**
5063 * sata_scr_valid - test whether SCRs are accessible
5064 * @ap: ATA port to test SCR accessibility for
5065 *
5066 * Test whether SCRs are accessible for @ap.
5067 *
5068 * LOCKING:
5069 * None.
5070 *
5071 * RETURNS:
5072 * 1 if SCRs are accessible, 0 otherwise.
5073 */
5074int sata_scr_valid(struct ata_port *ap)
5075{
5076 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5077}
5078
5079/**
5080 * sata_scr_read - read SCR register of the specified port
5081 * @ap: ATA port to read SCR for
5082 * @reg: SCR to read
5083 * @val: Place to store read value
5084 *
5085 * Read SCR register @reg of @ap into *@val. This function is
5086 * guaranteed to succeed if the cable type of the port is SATA
5087 * and the port implements ->scr_read.
5088 *
5089 * LOCKING:
5090 * None.
5091 *
5092 * RETURNS:
5093 * 0 on success, negative errno on failure.
5094 */
5095int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5096{
5097 if (sata_scr_valid(ap)) {
5098 *val = ap->ops->scr_read(ap, reg);
5099 return 0;
5100 }
5101 return -EOPNOTSUPP;
5102}
5103
5104/**
5105 * sata_scr_write - write SCR register of the specified port
5106 * @ap: ATA port to write SCR for
5107 * @reg: SCR to write
5108 * @val: value to write
5109 *
5110 * Write @val to SCR register @reg of @ap. This function is
5111 * guaranteed to succeed if the cable type of the port is SATA
5112 * and the port implements ->scr_read.
5113 *
5114 * LOCKING:
5115 * None.
5116 *
5117 * RETURNS:
5118 * 0 on success, negative errno on failure.
5119 */
5120int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5121{
5122 if (sata_scr_valid(ap)) {
5123 ap->ops->scr_write(ap, reg, val);
5124 return 0;
5125 }
5126 return -EOPNOTSUPP;
5127}
5128
5129/**
5130 * sata_scr_write_flush - write SCR register of the specified port and flush
5131 * @ap: ATA port to write SCR for
5132 * @reg: SCR to write
5133 * @val: value to write
5134 *
5135 * This function is identical to sata_scr_write() except that this
5136 * function performs flush after writing to the register.
5137 *
5138 * LOCKING:
5139 * None.
5140 *
5141 * RETURNS:
5142 * 0 on success, negative errno on failure.
5143 */
5144int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5145{
5146 if (sata_scr_valid(ap)) {
5147 ap->ops->scr_write(ap, reg, val);
5148 ap->ops->scr_read(ap, reg);
5149 return 0;
5150 }
5151 return -EOPNOTSUPP;
5152}
5153
5154/**
5155 * ata_port_online - test whether the given port is online
5156 * @ap: ATA port to test
5157 *
5158 * Test whether @ap is online. Note that this function returns 0
5159 * if online status of @ap cannot be obtained, so
5160 * ata_port_online(ap) != !ata_port_offline(ap).
5161 *
5162 * LOCKING:
5163 * None.
5164 *
5165 * RETURNS:
5166 * 1 if the port online status is available and online.
5167 */
5168int ata_port_online(struct ata_port *ap)
5169{
5170 u32 sstatus;
5171
5172 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5173 return 1;
5174 return 0;
5175}
5176
5177/**
5178 * ata_port_offline - test whether the given port is offline
5179 * @ap: ATA port to test
5180 *
5181 * Test whether @ap is offline. Note that this function returns
5182 * 0 if offline status of @ap cannot be obtained, so
5183 * ata_port_online(ap) != !ata_port_offline(ap).
5184 *
5185 * LOCKING:
5186 * None.
5187 *
5188 * RETURNS:
5189 * 1 if the port offline status is available and offline.
5190 */
5191int ata_port_offline(struct ata_port *ap)
5192{
5193 u32 sstatus;
5194
5195 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5196 return 1;
5197 return 0;
5198}
0baab86b 5199
77b08fb5 5200int ata_flush_cache(struct ata_device *dev)
9b847548 5201{
977e6b9f 5202 unsigned int err_mask;
9b847548
JA
5203 u8 cmd;
5204
5205 if (!ata_try_flush_cache(dev))
5206 return 0;
5207
6fc49adb 5208 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5209 cmd = ATA_CMD_FLUSH_EXT;
5210 else
5211 cmd = ATA_CMD_FLUSH;
5212
977e6b9f
TH
5213 err_mask = ata_do_simple_cmd(dev, cmd);
5214 if (err_mask) {
5215 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5216 return -EIO;
5217 }
5218
5219 return 0;
9b847548
JA
5220}
5221
cca3974e
JG
5222static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5223 unsigned int action, unsigned int ehi_flags,
5224 int wait)
500530f6
TH
5225{
5226 unsigned long flags;
5227 int i, rc;
5228
cca3974e
JG
5229 for (i = 0; i < host->n_ports; i++) {
5230 struct ata_port *ap = host->ports[i];
500530f6
TH
5231
5232 /* Previous resume operation might still be in
5233 * progress. Wait for PM_PENDING to clear.
5234 */
5235 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5236 ata_port_wait_eh(ap);
5237 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5238 }
5239
5240 /* request PM ops to EH */
5241 spin_lock_irqsave(ap->lock, flags);
5242
5243 ap->pm_mesg = mesg;
5244 if (wait) {
5245 rc = 0;
5246 ap->pm_result = &rc;
5247 }
5248
5249 ap->pflags |= ATA_PFLAG_PM_PENDING;
5250 ap->eh_info.action |= action;
5251 ap->eh_info.flags |= ehi_flags;
5252
5253 ata_port_schedule_eh(ap);
5254
5255 spin_unlock_irqrestore(ap->lock, flags);
5256
5257 /* wait and check result */
5258 if (wait) {
5259 ata_port_wait_eh(ap);
5260 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5261 if (rc)
5262 return rc;
5263 }
5264 }
5265
5266 return 0;
5267}
5268
5269/**
cca3974e
JG
5270 * ata_host_suspend - suspend host
5271 * @host: host to suspend
500530f6
TH
5272 * @mesg: PM message
5273 *
cca3974e 5274 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5275 * function requests EH to perform PM operations and waits for EH
5276 * to finish.
5277 *
5278 * LOCKING:
5279 * Kernel thread context (may sleep).
5280 *
5281 * RETURNS:
5282 * 0 on success, -errno on failure.
5283 */
cca3974e 5284int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5285{
5286 int i, j, rc;
5287
cca3974e 5288 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5289 if (rc)
5290 goto fail;
5291
5292 /* EH is quiescent now. Fail if we have any ready device.
5293 * This happens if hotplug occurs between completion of device
5294 * suspension and here.
5295 */
cca3974e
JG
5296 for (i = 0; i < host->n_ports; i++) {
5297 struct ata_port *ap = host->ports[i];
500530f6
TH
5298
5299 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5300 struct ata_device *dev = &ap->device[j];
5301
5302 if (ata_dev_ready(dev)) {
5303 ata_port_printk(ap, KERN_WARNING,
5304 "suspend failed, device %d "
5305 "still active\n", dev->devno);
5306 rc = -EBUSY;
5307 goto fail;
5308 }
5309 }
5310 }
5311
cca3974e 5312 host->dev->power.power_state = mesg;
500530f6
TH
5313 return 0;
5314
5315 fail:
cca3974e 5316 ata_host_resume(host);
500530f6
TH
5317 return rc;
5318}
5319
5320/**
cca3974e
JG
5321 * ata_host_resume - resume host
5322 * @host: host to resume
500530f6 5323 *
cca3974e 5324 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5325 * function requests EH to perform PM operations and returns.
5326 * Note that all resume operations are performed parallely.
5327 *
5328 * LOCKING:
5329 * Kernel thread context (may sleep).
5330 */
cca3974e 5331void ata_host_resume(struct ata_host *host)
500530f6 5332{
cca3974e
JG
5333 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5334 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5335 host->dev->power.power_state = PMSG_ON;
500530f6
TH
5336}
5337
c893a3ae
RD
5338/**
5339 * ata_port_start - Set port up for dma.
5340 * @ap: Port to initialize
5341 *
5342 * Called just after data structures for each port are
5343 * initialized. Allocates space for PRD table.
5344 *
5345 * May be used as the port_start() entry in ata_port_operations.
5346 *
5347 * LOCKING:
5348 * Inherited from caller.
5349 */
f0d36efd 5350int ata_port_start(struct ata_port *ap)
1da177e4 5351{
2f1f610b 5352 struct device *dev = ap->dev;
6037d6bb 5353 int rc;
1da177e4 5354
f0d36efd
TH
5355 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5356 GFP_KERNEL);
1da177e4
LT
5357 if (!ap->prd)
5358 return -ENOMEM;
5359
6037d6bb 5360 rc = ata_pad_alloc(ap, dev);
f0d36efd 5361 if (rc)
6037d6bb 5362 return rc;
1da177e4 5363
f0d36efd
TH
5364 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5365 (unsigned long long)ap->prd_dma);
1da177e4
LT
5366 return 0;
5367}
5368
3ef3b43d
TH
5369/**
5370 * ata_dev_init - Initialize an ata_device structure
5371 * @dev: Device structure to initialize
5372 *
5373 * Initialize @dev in preparation for probing.
5374 *
5375 * LOCKING:
5376 * Inherited from caller.
5377 */
5378void ata_dev_init(struct ata_device *dev)
5379{
5380 struct ata_port *ap = dev->ap;
72fa4b74
TH
5381 unsigned long flags;
5382
5a04bf4b
TH
5383 /* SATA spd limit is bound to the first device */
5384 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5385
72fa4b74
TH
5386 /* High bits of dev->flags are used to record warm plug
5387 * requests which occur asynchronously. Synchronize using
cca3974e 5388 * host lock.
72fa4b74 5389 */
ba6a1308 5390 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5391 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5392 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5393
72fa4b74
TH
5394 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5395 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5396 dev->pio_mask = UINT_MAX;
5397 dev->mwdma_mask = UINT_MAX;
5398 dev->udma_mask = UINT_MAX;
5399}
5400
1da177e4 5401/**
155a8a9c 5402 * ata_port_init - Initialize an ata_port structure
1da177e4 5403 * @ap: Structure to initialize
cca3974e 5404 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5405 * @ent: Probe information provided by low-level driver
5406 * @port_no: Port number associated with this ata_port
5407 *
155a8a9c 5408 * Initialize a new ata_port structure.
0cba632b 5409 *
1da177e4 5410 * LOCKING:
0cba632b 5411 * Inherited from caller.
1da177e4 5412 */
cca3974e 5413void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5414 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5415{
5416 unsigned int i;
5417
cca3974e 5418 ap->lock = &host->lock;
198e0fed 5419 ap->flags = ATA_FLAG_DISABLED;
155a8a9c 5420 ap->id = ata_unique_id++;
1da177e4 5421 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5422 ap->host = host;
2f1f610b 5423 ap->dev = ent->dev;
1da177e4 5424 ap->port_no = port_no;
fea63e38
TH
5425 if (port_no == 1 && ent->pinfo2) {
5426 ap->pio_mask = ent->pinfo2->pio_mask;
5427 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5428 ap->udma_mask = ent->pinfo2->udma_mask;
5429 ap->flags |= ent->pinfo2->flags;
5430 ap->ops = ent->pinfo2->port_ops;
5431 } else {
5432 ap->pio_mask = ent->pio_mask;
5433 ap->mwdma_mask = ent->mwdma_mask;
5434 ap->udma_mask = ent->udma_mask;
5435 ap->flags |= ent->port_flags;
5436 ap->ops = ent->port_ops;
5437 }
5a04bf4b 5438 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5439 ap->active_tag = ATA_TAG_POISON;
5440 ap->last_ctl = 0xFF;
bd5d825c
BP
5441
5442#if defined(ATA_VERBOSE_DEBUG)
5443 /* turn on all debugging levels */
5444 ap->msg_enable = 0x00FF;
5445#elif defined(ATA_DEBUG)
5446 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5447#else
0dd4b21f 5448 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5449#endif
1da177e4 5450
65f27f38
DH
5451 INIT_DELAYED_WORK(&ap->port_task, NULL);
5452 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5453 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5454 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5455 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5456
838df628
TH
5457 /* set cable type */
5458 ap->cbl = ATA_CBL_NONE;
5459 if (ap->flags & ATA_FLAG_SATA)
5460 ap->cbl = ATA_CBL_SATA;
5461
acf356b1
TH
5462 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5463 struct ata_device *dev = &ap->device[i];
38d87234 5464 dev->ap = ap;
72fa4b74 5465 dev->devno = i;
3ef3b43d 5466 ata_dev_init(dev);
acf356b1 5467 }
1da177e4
LT
5468
5469#ifdef ATA_IRQ_TRAP
5470 ap->stats.unhandled_irq = 1;
5471 ap->stats.idle_irq = 1;
5472#endif
5473
5474 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5475}
5476
155a8a9c 5477/**
4608c160
TH
5478 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5479 * @ap: ATA port to initialize SCSI host for
5480 * @shost: SCSI host associated with @ap
155a8a9c 5481 *
4608c160 5482 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5483 *
5484 * LOCKING:
5485 * Inherited from caller.
5486 */
4608c160 5487static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5488{
cca3974e 5489 ap->scsi_host = shost;
155a8a9c 5490
4608c160
TH
5491 shost->unique_id = ap->id;
5492 shost->max_id = 16;
5493 shost->max_lun = 1;
5494 shost->max_channel = 1;
5495 shost->max_cmd_len = 12;
155a8a9c
BK
5496}
5497
1da177e4 5498/**
996139f1 5499 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5500 * @ent: Information provided by low-level driver
cca3974e 5501 * @host: Collections of ports to which we add
1da177e4
LT
5502 * @port_no: Port number associated with this host
5503 *
0cba632b
JG
5504 * Attach low-level ATA driver to system.
5505 *
1da177e4 5506 * LOCKING:
0cba632b 5507 * PCI/etc. bus probe sem.
1da177e4
LT
5508 *
5509 * RETURNS:
0cba632b 5510 * New ata_port on success, for NULL on error.
1da177e4 5511 */
996139f1 5512static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5513 struct ata_host *host,
1da177e4
LT
5514 unsigned int port_no)
5515{
996139f1 5516 struct Scsi_Host *shost;
1da177e4 5517 struct ata_port *ap;
1da177e4
LT
5518
5519 DPRINTK("ENTER\n");
aec5c3c1 5520
52783c5d 5521 if (!ent->port_ops->error_handler &&
cca3974e 5522 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5523 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5524 port_no);
5525 return NULL;
5526 }
5527
996139f1
JG
5528 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5529 if (!shost)
1da177e4
LT
5530 return NULL;
5531
996139f1 5532 shost->transportt = &ata_scsi_transport_template;
30afc84c 5533
996139f1 5534 ap = ata_shost_to_port(shost);
1da177e4 5535
cca3974e 5536 ata_port_init(ap, host, ent, port_no);
996139f1 5537 ata_port_init_shost(ap, shost);
1da177e4 5538
1da177e4 5539 return ap;
1da177e4
LT
5540}
5541
f0d36efd
TH
5542static void ata_host_release(struct device *gendev, void *res)
5543{
5544 struct ata_host *host = dev_get_drvdata(gendev);
5545 int i;
5546
5547 for (i = 0; i < host->n_ports; i++) {
5548 struct ata_port *ap = host->ports[i];
5549
5550 if (!ap)
5551 continue;
5552
5553 if (ap->ops->port_stop)
5554 ap->ops->port_stop(ap);
5555
5556 scsi_host_put(ap->scsi_host);
5557 }
5558
5559 if (host->ops->host_stop)
5560 host->ops->host_stop(host);
5561}
5562
b03732f0 5563/**
cca3974e
JG
5564 * ata_sas_host_init - Initialize a host struct
5565 * @host: host to initialize
5566 * @dev: device host is attached to
5567 * @flags: host flags
5568 * @ops: port_ops
b03732f0
BK
5569 *
5570 * LOCKING:
5571 * PCI/etc. bus probe sem.
5572 *
5573 */
5574
cca3974e
JG
5575void ata_host_init(struct ata_host *host, struct device *dev,
5576 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5577{
cca3974e
JG
5578 spin_lock_init(&host->lock);
5579 host->dev = dev;
5580 host->flags = flags;
5581 host->ops = ops;
b03732f0
BK
5582}
5583
1da177e4 5584/**
0cba632b
JG
5585 * ata_device_add - Register hardware device with ATA and SCSI layers
5586 * @ent: Probe information describing hardware device to be registered
5587 *
5588 * This function processes the information provided in the probe
5589 * information struct @ent, allocates the necessary ATA and SCSI
5590 * host information structures, initializes them, and registers
5591 * everything with requisite kernel subsystems.
5592 *
5593 * This function requests irqs, probes the ATA bus, and probes
5594 * the SCSI bus.
1da177e4
LT
5595 *
5596 * LOCKING:
0cba632b 5597 * PCI/etc. bus probe sem.
1da177e4
LT
5598 *
5599 * RETURNS:
0cba632b 5600 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5601 */
057ace5e 5602int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5603{
6d0500df 5604 unsigned int i;
1da177e4 5605 struct device *dev = ent->dev;
cca3974e 5606 struct ata_host *host;
39b07ce6 5607 int rc;
1da177e4
LT
5608
5609 DPRINTK("ENTER\n");
f20b16ff 5610
02f076aa
AC
5611 if (ent->irq == 0) {
5612 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5613 return 0;
5614 }
f0d36efd
TH
5615
5616 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5617 return 0;
5618
1da177e4 5619 /* alloc a container for our list of ATA ports (buses) */
f0d36efd
TH
5620 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5621 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
cca3974e 5622 if (!host)
f0d36efd
TH
5623 goto err_out;
5624 devres_add(dev, host);
5625 dev_set_drvdata(dev, host);
1da177e4 5626
cca3974e
JG
5627 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5628 host->n_ports = ent->n_ports;
5629 host->irq = ent->irq;
5630 host->irq2 = ent->irq2;
0d5ff566 5631 host->iomap = ent->iomap;
cca3974e 5632 host->private_data = ent->private_data;
1da177e4
LT
5633
5634 /* register each port bound to this device */
cca3974e 5635 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5636 struct ata_port *ap;
5637 unsigned long xfer_mode_mask;
2ec7df04 5638 int irq_line = ent->irq;
1da177e4 5639
cca3974e 5640 ap = ata_port_add(ent, host, i);
c38778c3 5641 host->ports[i] = ap;
1da177e4
LT
5642 if (!ap)
5643 goto err_out;
5644
dd5b06c4
TH
5645 /* dummy? */
5646 if (ent->dummy_port_mask & (1 << i)) {
5647 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5648 ap->ops = &ata_dummy_port_ops;
5649 continue;
5650 }
5651
5652 /* start port */
5653 rc = ap->ops->port_start(ap);
5654 if (rc) {
cca3974e
JG
5655 host->ports[i] = NULL;
5656 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5657 goto err_out;
5658 }
5659
2ec7df04
AC
5660 /* Report the secondary IRQ for second channel legacy */
5661 if (i == 1 && ent->irq2)
5662 irq_line = ent->irq2;
5663
1da177e4
LT
5664 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5665 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5666 (ap->pio_mask << ATA_SHIFT_PIO);
5667
5668 /* print per-port info to dmesg */
0d5ff566
TH
5669 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5670 "ctl 0x%p bmdma 0x%p irq %d\n",
f15a1daf
TH
5671 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5672 ata_mode_string(xfer_mode_mask),
5673 ap->ioaddr.cmd_addr,
5674 ap->ioaddr.ctl_addr,
5675 ap->ioaddr.bmdma_addr,
2ec7df04 5676 irq_line);
1da177e4 5677
0f0a3ad3
TH
5678 /* freeze port before requesting IRQ */
5679 ata_eh_freeze_port(ap);
1da177e4
LT
5680 }
5681
2ec7df04 5682 /* obtain irq, that may be shared between channels */
f0d36efd
TH
5683 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5684 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
5685 if (rc) {
5686 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5687 ent->irq, rc);
1da177e4 5688 goto err_out;
39b07ce6 5689 }
1da177e4 5690
2ec7df04
AC
5691 /* do we have a second IRQ for the other channel, eg legacy mode */
5692 if (ent->irq2) {
5693 /* We will get weird core code crashes later if this is true
5694 so trap it now */
5695 BUG_ON(ent->irq == ent->irq2);
5696
f0d36efd
TH
5697 rc = devm_request_irq(dev, ent->irq2,
5698 ent->port_ops->irq_handler, ent->irq_flags,
5699 DRV_NAME, host);
2ec7df04
AC
5700 if (rc) {
5701 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5702 ent->irq2, rc);
f0d36efd 5703 goto err_out;
2ec7df04
AC
5704 }
5705 }
5706
f0d36efd 5707 /* resource acquisition complete */
b878ca5d 5708 devres_remove_group(dev, ata_device_add);
f0d36efd 5709
1da177e4
LT
5710 /* perform each probe synchronously */
5711 DPRINTK("probe begin\n");
cca3974e
JG
5712 for (i = 0; i < host->n_ports; i++) {
5713 struct ata_port *ap = host->ports[i];
5a04bf4b 5714 u32 scontrol;
1da177e4
LT
5715 int rc;
5716
5a04bf4b
TH
5717 /* init sata_spd_limit to the current value */
5718 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5719 int spd = (scontrol >> 4) & 0xf;
5720 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5721 }
5722 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5723
cca3974e 5724 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5725 if (rc) {
f15a1daf 5726 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5727 /* FIXME: do something useful here */
5728 /* FIXME: handle unconditional calls to
5729 * scsi_scan_host and ata_host_remove, below,
5730 * at the very least
5731 */
5732 }
3e706399 5733
52783c5d 5734 if (ap->ops->error_handler) {
1cdaf534 5735 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5736 unsigned long flags;
5737
5738 ata_port_probe(ap);
5739
5740 /* kick EH for boot probing */
ba6a1308 5741 spin_lock_irqsave(ap->lock, flags);
3e706399 5742
1cdaf534
TH
5743 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5744 ehi->action |= ATA_EH_SOFTRESET;
5745 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5746
b51e9e5d 5747 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5748 ata_port_schedule_eh(ap);
5749
ba6a1308 5750 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5751
5752 /* wait for EH to finish */
5753 ata_port_wait_eh(ap);
5754 } else {
5755 DPRINTK("ata%u: bus probe begin\n", ap->id);
5756 rc = ata_bus_probe(ap);
5757 DPRINTK("ata%u: bus probe end\n", ap->id);
5758
5759 if (rc) {
5760 /* FIXME: do something useful here?
5761 * Current libata behavior will
5762 * tear down everything when
5763 * the module is removed
5764 * or the h/w is unplugged.
5765 */
5766 }
5767 }
1da177e4
LT
5768 }
5769
5770 /* probes are done, now scan each port's disk(s) */
c893a3ae 5771 DPRINTK("host probe begin\n");
cca3974e
JG
5772 for (i = 0; i < host->n_ports; i++) {
5773 struct ata_port *ap = host->ports[i];
1da177e4 5774
644dd0cc 5775 ata_scsi_scan_host(ap);
1da177e4
LT
5776 }
5777
1da177e4
LT
5778 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5779 return ent->n_ports; /* success */
5780
f0d36efd
TH
5781 err_out:
5782 devres_release_group(dev, ata_device_add);
5783 dev_set_drvdata(dev, NULL);
5784 VPRINTK("EXIT, returning %d\n", rc);
1da177e4
LT
5785 return 0;
5786}
5787
720ba126
TH
5788/**
5789 * ata_port_detach - Detach ATA port in prepration of device removal
5790 * @ap: ATA port to be detached
5791 *
5792 * Detach all ATA devices and the associated SCSI devices of @ap;
5793 * then, remove the associated SCSI host. @ap is guaranteed to
5794 * be quiescent on return from this function.
5795 *
5796 * LOCKING:
5797 * Kernel thread context (may sleep).
5798 */
5799void ata_port_detach(struct ata_port *ap)
5800{
5801 unsigned long flags;
5802 int i;
5803
5804 if (!ap->ops->error_handler)
c3cf30a9 5805 goto skip_eh;
720ba126
TH
5806
5807 /* tell EH we're leaving & flush EH */
ba6a1308 5808 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5809 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5810 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5811
5812 ata_port_wait_eh(ap);
5813
5814 /* EH is now guaranteed to see UNLOADING, so no new device
5815 * will be attached. Disable all existing devices.
5816 */
ba6a1308 5817 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5818
5819 for (i = 0; i < ATA_MAX_DEVICES; i++)
5820 ata_dev_disable(&ap->device[i]);
5821
ba6a1308 5822 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5823
5824 /* Final freeze & EH. All in-flight commands are aborted. EH
5825 * will be skipped and retrials will be terminated with bad
5826 * target.
5827 */
ba6a1308 5828 spin_lock_irqsave(ap->lock, flags);
720ba126 5829 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5830 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5831
5832 ata_port_wait_eh(ap);
5833
5834 /* Flush hotplug task. The sequence is similar to
5835 * ata_port_flush_task().
5836 */
5837 flush_workqueue(ata_aux_wq);
5838 cancel_delayed_work(&ap->hotplug_task);
5839 flush_workqueue(ata_aux_wq);
5840
c3cf30a9 5841 skip_eh:
720ba126 5842 /* remove the associated SCSI host */
cca3974e 5843 scsi_remove_host(ap->scsi_host);
720ba126
TH
5844}
5845
0529c159
TH
5846/**
5847 * ata_host_detach - Detach all ports of an ATA host
5848 * @host: Host to detach
5849 *
5850 * Detach all ports of @host.
5851 *
5852 * LOCKING:
5853 * Kernel thread context (may sleep).
5854 */
5855void ata_host_detach(struct ata_host *host)
5856{
5857 int i;
5858
5859 for (i = 0; i < host->n_ports; i++)
5860 ata_port_detach(host->ports[i]);
5861}
5862
f6d950e2
BK
5863struct ata_probe_ent *
5864ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5865{
5866 struct ata_probe_ent *probe_ent;
5867
f0d36efd
TH
5868 /* XXX - the following if can go away once all LLDs are managed */
5869 if (!list_empty(&dev->devres_head))
5870 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
5871 else
5872 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
5873 if (!probe_ent) {
5874 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5875 kobject_name(&(dev->kobj)));
5876 return NULL;
5877 }
5878
5879 INIT_LIST_HEAD(&probe_ent->node);
5880 probe_ent->dev = dev;
5881
5882 probe_ent->sht = port->sht;
cca3974e 5883 probe_ent->port_flags = port->flags;
f6d950e2
BK
5884 probe_ent->pio_mask = port->pio_mask;
5885 probe_ent->mwdma_mask = port->mwdma_mask;
5886 probe_ent->udma_mask = port->udma_mask;
5887 probe_ent->port_ops = port->port_ops;
d639ca94 5888 probe_ent->private_data = port->private_data;
f6d950e2
BK
5889
5890 return probe_ent;
5891}
5892
1da177e4
LT
5893/**
5894 * ata_std_ports - initialize ioaddr with standard port offsets.
5895 * @ioaddr: IO address structure to be initialized
0baab86b
EF
5896 *
5897 * Utility function which initializes data_addr, error_addr,
5898 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5899 * device_addr, status_addr, and command_addr to standard offsets
5900 * relative to cmd_addr.
5901 *
5902 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 5903 */
0baab86b 5904
1da177e4
LT
5905void ata_std_ports(struct ata_ioports *ioaddr)
5906{
5907 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5908 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5909 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5910 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5911 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5912 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5913 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5914 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5915 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5916 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5917}
5918
0baab86b 5919
374b1873
JG
5920#ifdef CONFIG_PCI
5921
1da177e4
LT
5922/**
5923 * ata_pci_remove_one - PCI layer callback for device removal
5924 * @pdev: PCI device that was removed
5925 *
b878ca5d
TH
5926 * PCI layer indicates to libata via this hook that hot-unplug or
5927 * module unload event has occurred. Detach all ports. Resource
5928 * release is handled via devres.
1da177e4
LT
5929 *
5930 * LOCKING:
5931 * Inherited from PCI layer (may sleep).
5932 */
f0d36efd 5933void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
5934{
5935 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 5936 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 5937
b878ca5d 5938 ata_host_detach(host);
1da177e4
LT
5939}
5940
5941/* move to PCI subsystem */
057ace5e 5942int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
5943{
5944 unsigned long tmp = 0;
5945
5946 switch (bits->width) {
5947 case 1: {
5948 u8 tmp8 = 0;
5949 pci_read_config_byte(pdev, bits->reg, &tmp8);
5950 tmp = tmp8;
5951 break;
5952 }
5953 case 2: {
5954 u16 tmp16 = 0;
5955 pci_read_config_word(pdev, bits->reg, &tmp16);
5956 tmp = tmp16;
5957 break;
5958 }
5959 case 4: {
5960 u32 tmp32 = 0;
5961 pci_read_config_dword(pdev, bits->reg, &tmp32);
5962 tmp = tmp32;
5963 break;
5964 }
5965
5966 default:
5967 return -EINVAL;
5968 }
5969
5970 tmp &= bits->mask;
5971
5972 return (tmp == bits->val) ? 1 : 0;
5973}
9b847548 5974
3c5100c1 5975void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
5976{
5977 pci_save_state(pdev);
500530f6 5978
3c5100c1 5979 if (mesg.event == PM_EVENT_SUSPEND) {
500530f6
TH
5980 pci_disable_device(pdev);
5981 pci_set_power_state(pdev, PCI_D3hot);
5982 }
9b847548
JA
5983}
5984
553c4aa6 5985int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 5986{
553c4aa6
TH
5987 int rc;
5988
9b847548
JA
5989 pci_set_power_state(pdev, PCI_D0);
5990 pci_restore_state(pdev);
553c4aa6 5991
b878ca5d 5992 rc = pcim_enable_device(pdev);
553c4aa6
TH
5993 if (rc) {
5994 dev_printk(KERN_ERR, &pdev->dev,
5995 "failed to enable device after resume (%d)\n", rc);
5996 return rc;
5997 }
5998
9b847548 5999 pci_set_master(pdev);
553c4aa6 6000 return 0;
500530f6
TH
6001}
6002
3c5100c1 6003int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6004{
cca3974e 6005 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6006 int rc = 0;
6007
cca3974e 6008 rc = ata_host_suspend(host, mesg);
500530f6
TH
6009 if (rc)
6010 return rc;
6011
3c5100c1 6012 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6013
6014 return 0;
6015}
6016
6017int ata_pci_device_resume(struct pci_dev *pdev)
6018{
cca3974e 6019 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6020 int rc;
500530f6 6021
553c4aa6
TH
6022 rc = ata_pci_device_do_resume(pdev);
6023 if (rc == 0)
6024 ata_host_resume(host);
6025 return rc;
9b847548 6026}
1da177e4
LT
6027#endif /* CONFIG_PCI */
6028
6029
1da177e4
LT
6030static int __init ata_init(void)
6031{
a8601e5f 6032 ata_probe_timeout *= HZ;
1da177e4
LT
6033 ata_wq = create_workqueue("ata");
6034 if (!ata_wq)
6035 return -ENOMEM;
6036
453b07ac
TH
6037 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6038 if (!ata_aux_wq) {
6039 destroy_workqueue(ata_wq);
6040 return -ENOMEM;
6041 }
6042
1da177e4
LT
6043 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6044 return 0;
6045}
6046
6047static void __exit ata_exit(void)
6048{
6049 destroy_workqueue(ata_wq);
453b07ac 6050 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6051}
6052
a4625085 6053subsys_initcall(ata_init);
1da177e4
LT
6054module_exit(ata_exit);
6055
67846b30 6056static unsigned long ratelimit_time;
34af946a 6057static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6058
6059int ata_ratelimit(void)
6060{
6061 int rc;
6062 unsigned long flags;
6063
6064 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6065
6066 if (time_after(jiffies, ratelimit_time)) {
6067 rc = 1;
6068 ratelimit_time = jiffies + (HZ/5);
6069 } else
6070 rc = 0;
6071
6072 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6073
6074 return rc;
6075}
6076
c22daff4
TH
6077/**
6078 * ata_wait_register - wait until register value changes
6079 * @reg: IO-mapped register
6080 * @mask: Mask to apply to read register value
6081 * @val: Wait condition
6082 * @interval_msec: polling interval in milliseconds
6083 * @timeout_msec: timeout in milliseconds
6084 *
6085 * Waiting for some bits of register to change is a common
6086 * operation for ATA controllers. This function reads 32bit LE
6087 * IO-mapped register @reg and tests for the following condition.
6088 *
6089 * (*@reg & mask) != val
6090 *
6091 * If the condition is met, it returns; otherwise, the process is
6092 * repeated after @interval_msec until timeout.
6093 *
6094 * LOCKING:
6095 * Kernel thread context (may sleep)
6096 *
6097 * RETURNS:
6098 * The final register value.
6099 */
6100u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6101 unsigned long interval_msec,
6102 unsigned long timeout_msec)
6103{
6104 unsigned long timeout;
6105 u32 tmp;
6106
6107 tmp = ioread32(reg);
6108
6109 /* Calculate timeout _after_ the first read to make sure
6110 * preceding writes reach the controller before starting to
6111 * eat away the timeout.
6112 */
6113 timeout = jiffies + (timeout_msec * HZ) / 1000;
6114
6115 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6116 msleep(interval_msec);
6117 tmp = ioread32(reg);
6118 }
6119
6120 return tmp;
6121}
6122
dd5b06c4
TH
6123/*
6124 * Dummy port_ops
6125 */
6126static void ata_dummy_noret(struct ata_port *ap) { }
6127static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6128static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6129
6130static u8 ata_dummy_check_status(struct ata_port *ap)
6131{
6132 return ATA_DRDY;
6133}
6134
6135static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6136{
6137 return AC_ERR_SYSTEM;
6138}
6139
6140const struct ata_port_operations ata_dummy_port_ops = {
6141 .port_disable = ata_port_disable,
6142 .check_status = ata_dummy_check_status,
6143 .check_altstatus = ata_dummy_check_status,
6144 .dev_select = ata_noop_dev_select,
6145 .qc_prep = ata_noop_qc_prep,
6146 .qc_issue = ata_dummy_qc_issue,
6147 .freeze = ata_dummy_noret,
6148 .thaw = ata_dummy_noret,
6149 .error_handler = ata_dummy_noret,
6150 .post_internal_cmd = ata_dummy_qc_noret,
6151 .irq_clear = ata_dummy_noret,
6152 .port_start = ata_dummy_ret0,
6153 .port_stop = ata_dummy_noret,
6154};
6155
1da177e4
LT
6156/*
6157 * libata is essentially a library of internal helper functions for
6158 * low-level ATA host controller drivers. As such, the API/ABI is
6159 * likely to change as new drivers are added and updated.
6160 * Do not depend on ABI/API stability.
6161 */
6162
e9c83914
TH
6163EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6164EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6165EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6166EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6167EXPORT_SYMBOL_GPL(ata_std_bios_param);
6168EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6169EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6170EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6171EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6172EXPORT_SYMBOL_GPL(ata_sg_init);
6173EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6174EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6175EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6176EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6177EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6178EXPORT_SYMBOL_GPL(ata_tf_load);
6179EXPORT_SYMBOL_GPL(ata_tf_read);
6180EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6181EXPORT_SYMBOL_GPL(ata_std_dev_select);
6182EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6183EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6184EXPORT_SYMBOL_GPL(ata_check_status);
6185EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6186EXPORT_SYMBOL_GPL(ata_exec_command);
6187EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6188EXPORT_SYMBOL_GPL(ata_interrupt);
0d5ff566
TH
6189EXPORT_SYMBOL_GPL(ata_data_xfer);
6190EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6191EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6192EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6193EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6194EXPORT_SYMBOL_GPL(ata_bmdma_start);
6195EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6196EXPORT_SYMBOL_GPL(ata_bmdma_status);
6197EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6198EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6199EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6200EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6201EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6202EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6203EXPORT_SYMBOL_GPL(ata_port_probe);
3c567b7d 6204EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6205EXPORT_SYMBOL_GPL(sata_phy_debounce);
6206EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6207EXPORT_SYMBOL_GPL(sata_phy_reset);
6208EXPORT_SYMBOL_GPL(__sata_phy_reset);
6209EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6210EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6211EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6212EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6213EXPORT_SYMBOL_GPL(sata_std_hardreset);
6214EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6215EXPORT_SYMBOL_GPL(ata_dev_classify);
6216EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6217EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6218EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6219EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6220EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6221EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6222EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6223EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6224EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6225EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6226EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6227EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6228EXPORT_SYMBOL_GPL(sata_scr_valid);
6229EXPORT_SYMBOL_GPL(sata_scr_read);
6230EXPORT_SYMBOL_GPL(sata_scr_write);
6231EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6232EXPORT_SYMBOL_GPL(ata_port_online);
6233EXPORT_SYMBOL_GPL(ata_port_offline);
cca3974e
JG
6234EXPORT_SYMBOL_GPL(ata_host_suspend);
6235EXPORT_SYMBOL_GPL(ata_host_resume);
6a62a04d
TH
6236EXPORT_SYMBOL_GPL(ata_id_string);
6237EXPORT_SYMBOL_GPL(ata_id_c_string);
6919a0a6 6238EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6239EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6240
1bc4ccff 6241EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6242EXPORT_SYMBOL_GPL(ata_timing_compute);
6243EXPORT_SYMBOL_GPL(ata_timing_merge);
6244
1da177e4
LT
6245#ifdef CONFIG_PCI
6246EXPORT_SYMBOL_GPL(pci_test_config_bits);
6247EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6248EXPORT_SYMBOL_GPL(ata_pci_init_one);
6249EXPORT_SYMBOL_GPL(ata_pci_remove_one);
500530f6
TH
6250EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6251EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6252EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6253EXPORT_SYMBOL_GPL(ata_pci_device_resume);
67951ade
AC
6254EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6255EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6256#endif /* CONFIG_PCI */
9b847548 6257
9b847548
JA
6258EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6259EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
ece1d636 6260
ece1d636 6261EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6262EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6263EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6264EXPORT_SYMBOL_GPL(ata_port_freeze);
6265EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6266EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6267EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6268EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6269EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6270EXPORT_SYMBOL_GPL(ata_irq_on);
6271EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6272EXPORT_SYMBOL_GPL(ata_irq_ack);
6273EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);