]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame_incremental - drivers/ata/libata-core.c
libata: move ata_altstatus() to pio data xfer functions
[mirror_ubuntu-artful-kernel.git] / drivers / ata / libata-core.c
... / ...
CommitLineData
1/*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
50#include <linux/jiffies.h>
51#include <linux/scatterlist.h>
52#include <scsi/scsi.h>
53#include <scsi/scsi_cmnd.h>
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
62#define DRV_VERSION "2.21" /* must be exactly four chars */
63
64
65/* debounce timing parameters in msecs { interval, duration, timeout } */
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
69
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
74static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
75
76unsigned int ata_print_id = 1;
77static struct workqueue_struct *ata_wq;
78
79struct workqueue_struct *ata_aux_wq;
80
81int atapi_enabled = 1;
82module_param(atapi_enabled, int, 0444);
83MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84
85int atapi_dmadir = 0;
86module_param(atapi_dmadir, int, 0444);
87MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88
89int libata_fua = 0;
90module_param_named(fua, libata_fua, int, 0444);
91MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
92
93static int ata_ignore_hpa = 0;
94module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
95MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
96
97static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
98module_param(ata_probe_timeout, int, 0444);
99MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
100
101int libata_noacpi = 1;
102module_param_named(noacpi, libata_noacpi, int, 0444);
103MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
104
105MODULE_AUTHOR("Jeff Garzik");
106MODULE_DESCRIPTION("Library module for ATA devices");
107MODULE_LICENSE("GPL");
108MODULE_VERSION(DRV_VERSION);
109
110
111/**
112 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
113 * @tf: Taskfile to convert
114 * @pmp: Port multiplier port
115 * @is_cmd: This FIS is for command
116 * @fis: Buffer into which data will output
117 *
118 * Converts a standard ATA taskfile to a Serial ATA
119 * FIS structure (Register - Host to Device).
120 *
121 * LOCKING:
122 * Inherited from caller.
123 */
124void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
125{
126 fis[0] = 0x27; /* Register - Host to Device FIS */
127 fis[1] = pmp & 0xf; /* Port multiplier number*/
128 if (is_cmd)
129 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
130
131 fis[2] = tf->command;
132 fis[3] = tf->feature;
133
134 fis[4] = tf->lbal;
135 fis[5] = tf->lbam;
136 fis[6] = tf->lbah;
137 fis[7] = tf->device;
138
139 fis[8] = tf->hob_lbal;
140 fis[9] = tf->hob_lbam;
141 fis[10] = tf->hob_lbah;
142 fis[11] = tf->hob_feature;
143
144 fis[12] = tf->nsect;
145 fis[13] = tf->hob_nsect;
146 fis[14] = 0;
147 fis[15] = tf->ctl;
148
149 fis[16] = 0;
150 fis[17] = 0;
151 fis[18] = 0;
152 fis[19] = 0;
153}
154
155/**
156 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
157 * @fis: Buffer from which data will be input
158 * @tf: Taskfile to output
159 *
160 * Converts a serial ATA FIS structure to a standard ATA taskfile.
161 *
162 * LOCKING:
163 * Inherited from caller.
164 */
165
166void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
167{
168 tf->command = fis[2]; /* status */
169 tf->feature = fis[3]; /* error */
170
171 tf->lbal = fis[4];
172 tf->lbam = fis[5];
173 tf->lbah = fis[6];
174 tf->device = fis[7];
175
176 tf->hob_lbal = fis[8];
177 tf->hob_lbam = fis[9];
178 tf->hob_lbah = fis[10];
179
180 tf->nsect = fis[12];
181 tf->hob_nsect = fis[13];
182}
183
184static const u8 ata_rw_cmds[] = {
185 /* pio multi */
186 ATA_CMD_READ_MULTI,
187 ATA_CMD_WRITE_MULTI,
188 ATA_CMD_READ_MULTI_EXT,
189 ATA_CMD_WRITE_MULTI_EXT,
190 0,
191 0,
192 0,
193 ATA_CMD_WRITE_MULTI_FUA_EXT,
194 /* pio */
195 ATA_CMD_PIO_READ,
196 ATA_CMD_PIO_WRITE,
197 ATA_CMD_PIO_READ_EXT,
198 ATA_CMD_PIO_WRITE_EXT,
199 0,
200 0,
201 0,
202 0,
203 /* dma */
204 ATA_CMD_READ,
205 ATA_CMD_WRITE,
206 ATA_CMD_READ_EXT,
207 ATA_CMD_WRITE_EXT,
208 0,
209 0,
210 0,
211 ATA_CMD_WRITE_FUA_EXT
212};
213
214/**
215 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
216 * @tf: command to examine and configure
217 * @dev: device tf belongs to
218 *
219 * Examine the device configuration and tf->flags to calculate
220 * the proper read/write commands and protocol to use.
221 *
222 * LOCKING:
223 * caller.
224 */
225static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
226{
227 u8 cmd;
228
229 int index, fua, lba48, write;
230
231 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
232 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
233 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
234
235 if (dev->flags & ATA_DFLAG_PIO) {
236 tf->protocol = ATA_PROT_PIO;
237 index = dev->multi_count ? 0 : 8;
238 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
239 /* Unable to use DMA due to host limitation */
240 tf->protocol = ATA_PROT_PIO;
241 index = dev->multi_count ? 0 : 8;
242 } else {
243 tf->protocol = ATA_PROT_DMA;
244 index = 16;
245 }
246
247 cmd = ata_rw_cmds[index + fua + lba48 + write];
248 if (cmd) {
249 tf->command = cmd;
250 return 0;
251 }
252 return -1;
253}
254
255/**
256 * ata_tf_read_block - Read block address from ATA taskfile
257 * @tf: ATA taskfile of interest
258 * @dev: ATA device @tf belongs to
259 *
260 * LOCKING:
261 * None.
262 *
263 * Read block address from @tf. This function can handle all
264 * three address formats - LBA, LBA48 and CHS. tf->protocol and
265 * flags select the address format to use.
266 *
267 * RETURNS:
268 * Block address read from @tf.
269 */
270u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
271{
272 u64 block = 0;
273
274 if (tf->flags & ATA_TFLAG_LBA) {
275 if (tf->flags & ATA_TFLAG_LBA48) {
276 block |= (u64)tf->hob_lbah << 40;
277 block |= (u64)tf->hob_lbam << 32;
278 block |= tf->hob_lbal << 24;
279 } else
280 block |= (tf->device & 0xf) << 24;
281
282 block |= tf->lbah << 16;
283 block |= tf->lbam << 8;
284 block |= tf->lbal;
285 } else {
286 u32 cyl, head, sect;
287
288 cyl = tf->lbam | (tf->lbah << 8);
289 head = tf->device & 0xf;
290 sect = tf->lbal;
291
292 block = (cyl * dev->heads + head) * dev->sectors + sect;
293 }
294
295 return block;
296}
297
298/**
299 * ata_build_rw_tf - Build ATA taskfile for given read/write request
300 * @tf: Target ATA taskfile
301 * @dev: ATA device @tf belongs to
302 * @block: Block address
303 * @n_block: Number of blocks
304 * @tf_flags: RW/FUA etc...
305 * @tag: tag
306 *
307 * LOCKING:
308 * None.
309 *
310 * Build ATA taskfile @tf for read/write request described by
311 * @block, @n_block, @tf_flags and @tag on @dev.
312 *
313 * RETURNS:
314 *
315 * 0 on success, -ERANGE if the request is too large for @dev,
316 * -EINVAL if the request is invalid.
317 */
318int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
319 u64 block, u32 n_block, unsigned int tf_flags,
320 unsigned int tag)
321{
322 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
323 tf->flags |= tf_flags;
324
325 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
326 /* yay, NCQ */
327 if (!lba_48_ok(block, n_block))
328 return -ERANGE;
329
330 tf->protocol = ATA_PROT_NCQ;
331 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
332
333 if (tf->flags & ATA_TFLAG_WRITE)
334 tf->command = ATA_CMD_FPDMA_WRITE;
335 else
336 tf->command = ATA_CMD_FPDMA_READ;
337
338 tf->nsect = tag << 3;
339 tf->hob_feature = (n_block >> 8) & 0xff;
340 tf->feature = n_block & 0xff;
341
342 tf->hob_lbah = (block >> 40) & 0xff;
343 tf->hob_lbam = (block >> 32) & 0xff;
344 tf->hob_lbal = (block >> 24) & 0xff;
345 tf->lbah = (block >> 16) & 0xff;
346 tf->lbam = (block >> 8) & 0xff;
347 tf->lbal = block & 0xff;
348
349 tf->device = 1 << 6;
350 if (tf->flags & ATA_TFLAG_FUA)
351 tf->device |= 1 << 7;
352 } else if (dev->flags & ATA_DFLAG_LBA) {
353 tf->flags |= ATA_TFLAG_LBA;
354
355 if (lba_28_ok(block, n_block)) {
356 /* use LBA28 */
357 tf->device |= (block >> 24) & 0xf;
358 } else if (lba_48_ok(block, n_block)) {
359 if (!(dev->flags & ATA_DFLAG_LBA48))
360 return -ERANGE;
361
362 /* use LBA48 */
363 tf->flags |= ATA_TFLAG_LBA48;
364
365 tf->hob_nsect = (n_block >> 8) & 0xff;
366
367 tf->hob_lbah = (block >> 40) & 0xff;
368 tf->hob_lbam = (block >> 32) & 0xff;
369 tf->hob_lbal = (block >> 24) & 0xff;
370 } else
371 /* request too large even for LBA48 */
372 return -ERANGE;
373
374 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
375 return -EINVAL;
376
377 tf->nsect = n_block & 0xff;
378
379 tf->lbah = (block >> 16) & 0xff;
380 tf->lbam = (block >> 8) & 0xff;
381 tf->lbal = block & 0xff;
382
383 tf->device |= ATA_LBA;
384 } else {
385 /* CHS */
386 u32 sect, head, cyl, track;
387
388 /* The request -may- be too large for CHS addressing. */
389 if (!lba_28_ok(block, n_block))
390 return -ERANGE;
391
392 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
393 return -EINVAL;
394
395 /* Convert LBA to CHS */
396 track = (u32)block / dev->sectors;
397 cyl = track / dev->heads;
398 head = track % dev->heads;
399 sect = (u32)block % dev->sectors + 1;
400
401 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
402 (u32)block, track, cyl, head, sect);
403
404 /* Check whether the converted CHS can fit.
405 Cylinder: 0-65535
406 Head: 0-15
407 Sector: 1-255*/
408 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
409 return -ERANGE;
410
411 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
412 tf->lbal = sect;
413 tf->lbam = cyl;
414 tf->lbah = cyl >> 8;
415 tf->device |= head;
416 }
417
418 return 0;
419}
420
421/**
422 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
423 * @pio_mask: pio_mask
424 * @mwdma_mask: mwdma_mask
425 * @udma_mask: udma_mask
426 *
427 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
428 * unsigned int xfer_mask.
429 *
430 * LOCKING:
431 * None.
432 *
433 * RETURNS:
434 * Packed xfer_mask.
435 */
436static unsigned int ata_pack_xfermask(unsigned int pio_mask,
437 unsigned int mwdma_mask,
438 unsigned int udma_mask)
439{
440 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
441 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
442 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
443}
444
445/**
446 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
447 * @xfer_mask: xfer_mask to unpack
448 * @pio_mask: resulting pio_mask
449 * @mwdma_mask: resulting mwdma_mask
450 * @udma_mask: resulting udma_mask
451 *
452 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
453 * Any NULL distination masks will be ignored.
454 */
455static void ata_unpack_xfermask(unsigned int xfer_mask,
456 unsigned int *pio_mask,
457 unsigned int *mwdma_mask,
458 unsigned int *udma_mask)
459{
460 if (pio_mask)
461 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
462 if (mwdma_mask)
463 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
464 if (udma_mask)
465 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
466}
467
468static const struct ata_xfer_ent {
469 int shift, bits;
470 u8 base;
471} ata_xfer_tbl[] = {
472 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
473 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
474 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
475 { -1, },
476};
477
478/**
479 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
480 * @xfer_mask: xfer_mask of interest
481 *
482 * Return matching XFER_* value for @xfer_mask. Only the highest
483 * bit of @xfer_mask is considered.
484 *
485 * LOCKING:
486 * None.
487 *
488 * RETURNS:
489 * Matching XFER_* value, 0 if no match found.
490 */
491static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
492{
493 int highbit = fls(xfer_mask) - 1;
494 const struct ata_xfer_ent *ent;
495
496 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
497 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
498 return ent->base + highbit - ent->shift;
499 return 0;
500}
501
502/**
503 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
504 * @xfer_mode: XFER_* of interest
505 *
506 * Return matching xfer_mask for @xfer_mode.
507 *
508 * LOCKING:
509 * None.
510 *
511 * RETURNS:
512 * Matching xfer_mask, 0 if no match found.
513 */
514static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
515{
516 const struct ata_xfer_ent *ent;
517
518 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
519 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
520 return 1 << (ent->shift + xfer_mode - ent->base);
521 return 0;
522}
523
524/**
525 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
526 * @xfer_mode: XFER_* of interest
527 *
528 * Return matching xfer_shift for @xfer_mode.
529 *
530 * LOCKING:
531 * None.
532 *
533 * RETURNS:
534 * Matching xfer_shift, -1 if no match found.
535 */
536static int ata_xfer_mode2shift(unsigned int xfer_mode)
537{
538 const struct ata_xfer_ent *ent;
539
540 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
541 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
542 return ent->shift;
543 return -1;
544}
545
546/**
547 * ata_mode_string - convert xfer_mask to string
548 * @xfer_mask: mask of bits supported; only highest bit counts.
549 *
550 * Determine string which represents the highest speed
551 * (highest bit in @modemask).
552 *
553 * LOCKING:
554 * None.
555 *
556 * RETURNS:
557 * Constant C string representing highest speed listed in
558 * @mode_mask, or the constant C string "<n/a>".
559 */
560static const char *ata_mode_string(unsigned int xfer_mask)
561{
562 static const char * const xfer_mode_str[] = {
563 "PIO0",
564 "PIO1",
565 "PIO2",
566 "PIO3",
567 "PIO4",
568 "PIO5",
569 "PIO6",
570 "MWDMA0",
571 "MWDMA1",
572 "MWDMA2",
573 "MWDMA3",
574 "MWDMA4",
575 "UDMA/16",
576 "UDMA/25",
577 "UDMA/33",
578 "UDMA/44",
579 "UDMA/66",
580 "UDMA/100",
581 "UDMA/133",
582 "UDMA7",
583 };
584 int highbit;
585
586 highbit = fls(xfer_mask) - 1;
587 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
588 return xfer_mode_str[highbit];
589 return "<n/a>";
590}
591
592static const char *sata_spd_string(unsigned int spd)
593{
594 static const char * const spd_str[] = {
595 "1.5 Gbps",
596 "3.0 Gbps",
597 };
598
599 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
600 return "<unknown>";
601 return spd_str[spd - 1];
602}
603
604void ata_dev_disable(struct ata_device *dev)
605{
606 if (ata_dev_enabled(dev)) {
607 if (ata_msg_drv(dev->link->ap))
608 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
609 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
610 ATA_DNXFER_QUIET);
611 dev->class++;
612 }
613}
614
615/**
616 * ata_devchk - PATA device presence detection
617 * @ap: ATA channel to examine
618 * @device: Device to examine (starting at zero)
619 *
620 * This technique was originally described in
621 * Hale Landis's ATADRVR (www.ata-atapi.com), and
622 * later found its way into the ATA/ATAPI spec.
623 *
624 * Write a pattern to the ATA shadow registers,
625 * and if a device is present, it will respond by
626 * correctly storing and echoing back the
627 * ATA shadow register contents.
628 *
629 * LOCKING:
630 * caller.
631 */
632
633static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
634{
635 struct ata_ioports *ioaddr = &ap->ioaddr;
636 u8 nsect, lbal;
637
638 ap->ops->dev_select(ap, device);
639
640 iowrite8(0x55, ioaddr->nsect_addr);
641 iowrite8(0xaa, ioaddr->lbal_addr);
642
643 iowrite8(0xaa, ioaddr->nsect_addr);
644 iowrite8(0x55, ioaddr->lbal_addr);
645
646 iowrite8(0x55, ioaddr->nsect_addr);
647 iowrite8(0xaa, ioaddr->lbal_addr);
648
649 nsect = ioread8(ioaddr->nsect_addr);
650 lbal = ioread8(ioaddr->lbal_addr);
651
652 if ((nsect == 0x55) && (lbal == 0xaa))
653 return 1; /* we found a device */
654
655 return 0; /* nothing found */
656}
657
658/**
659 * ata_dev_classify - determine device type based on ATA-spec signature
660 * @tf: ATA taskfile register set for device to be identified
661 *
662 * Determine from taskfile register contents whether a device is
663 * ATA or ATAPI, as per "Signature and persistence" section
664 * of ATA/PI spec (volume 1, sect 5.14).
665 *
666 * LOCKING:
667 * None.
668 *
669 * RETURNS:
670 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
671 * the event of failure.
672 */
673
674unsigned int ata_dev_classify(const struct ata_taskfile *tf)
675{
676 /* Apple's open source Darwin code hints that some devices only
677 * put a proper signature into the LBA mid/high registers,
678 * So, we only check those. It's sufficient for uniqueness.
679 */
680
681 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
682 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
683 DPRINTK("found ATA device by sig\n");
684 return ATA_DEV_ATA;
685 }
686
687 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
688 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
689 DPRINTK("found ATAPI device by sig\n");
690 return ATA_DEV_ATAPI;
691 }
692
693 DPRINTK("unknown device\n");
694 return ATA_DEV_UNKNOWN;
695}
696
697/**
698 * ata_dev_try_classify - Parse returned ATA device signature
699 * @ap: ATA channel to examine
700 * @device: Device to examine (starting at zero)
701 * @r_err: Value of error register on completion
702 *
703 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
704 * an ATA/ATAPI-defined set of values is placed in the ATA
705 * shadow registers, indicating the results of device detection
706 * and diagnostics.
707 *
708 * Select the ATA device, and read the values from the ATA shadow
709 * registers. Then parse according to the Error register value,
710 * and the spec-defined values examined by ata_dev_classify().
711 *
712 * LOCKING:
713 * caller.
714 *
715 * RETURNS:
716 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
717 */
718
719unsigned int
720ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
721{
722 struct ata_taskfile tf;
723 unsigned int class;
724 u8 err;
725
726 ap->ops->dev_select(ap, device);
727
728 memset(&tf, 0, sizeof(tf));
729
730 ap->ops->tf_read(ap, &tf);
731 err = tf.feature;
732 if (r_err)
733 *r_err = err;
734
735 /* see if device passed diags: if master then continue and warn later */
736 if (err == 0 && device == 0)
737 /* diagnostic fail : do nothing _YET_ */
738 ap->link.device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
739 else if (err == 1)
740 /* do nothing */ ;
741 else if ((device == 0) && (err == 0x81))
742 /* do nothing */ ;
743 else
744 return ATA_DEV_NONE;
745
746 /* determine if device is ATA or ATAPI */
747 class = ata_dev_classify(&tf);
748
749 if (class == ATA_DEV_UNKNOWN)
750 return ATA_DEV_NONE;
751 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
752 return ATA_DEV_NONE;
753 return class;
754}
755
756/**
757 * ata_id_string - Convert IDENTIFY DEVICE page into string
758 * @id: IDENTIFY DEVICE results we will examine
759 * @s: string into which data is output
760 * @ofs: offset into identify device page
761 * @len: length of string to return. must be an even number.
762 *
763 * The strings in the IDENTIFY DEVICE page are broken up into
764 * 16-bit chunks. Run through the string, and output each
765 * 8-bit chunk linearly, regardless of platform.
766 *
767 * LOCKING:
768 * caller.
769 */
770
771void ata_id_string(const u16 *id, unsigned char *s,
772 unsigned int ofs, unsigned int len)
773{
774 unsigned int c;
775
776 while (len > 0) {
777 c = id[ofs] >> 8;
778 *s = c;
779 s++;
780
781 c = id[ofs] & 0xff;
782 *s = c;
783 s++;
784
785 ofs++;
786 len -= 2;
787 }
788}
789
790/**
791 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
792 * @id: IDENTIFY DEVICE results we will examine
793 * @s: string into which data is output
794 * @ofs: offset into identify device page
795 * @len: length of string to return. must be an odd number.
796 *
797 * This function is identical to ata_id_string except that it
798 * trims trailing spaces and terminates the resulting string with
799 * null. @len must be actual maximum length (even number) + 1.
800 *
801 * LOCKING:
802 * caller.
803 */
804void ata_id_c_string(const u16 *id, unsigned char *s,
805 unsigned int ofs, unsigned int len)
806{
807 unsigned char *p;
808
809 WARN_ON(!(len & 1));
810
811 ata_id_string(id, s, ofs, len - 1);
812
813 p = s + strnlen(s, len - 1);
814 while (p > s && p[-1] == ' ')
815 p--;
816 *p = '\0';
817}
818
819static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
820{
821 u64 sectors = 0;
822
823 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
824 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
825 sectors |= (tf->hob_lbal & 0xff) << 24;
826 sectors |= (tf->lbah & 0xff) << 16;
827 sectors |= (tf->lbam & 0xff) << 8;
828 sectors |= (tf->lbal & 0xff);
829
830 return ++sectors;
831}
832
833static u64 ata_tf_to_lba(struct ata_taskfile *tf)
834{
835 u64 sectors = 0;
836
837 sectors |= (tf->device & 0x0f) << 24;
838 sectors |= (tf->lbah & 0xff) << 16;
839 sectors |= (tf->lbam & 0xff) << 8;
840 sectors |= (tf->lbal & 0xff);
841
842 return ++sectors;
843}
844
845/**
846 * ata_read_native_max_address_ext - LBA48 native max query
847 * @dev: Device to query
848 *
849 * Perform an LBA48 size query upon the device in question. Return the
850 * actual LBA48 size or zero if the command fails.
851 */
852
853static u64 ata_read_native_max_address_ext(struct ata_device *dev)
854{
855 unsigned int err;
856 struct ata_taskfile tf;
857
858 ata_tf_init(dev, &tf);
859
860 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
861 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
862 tf.protocol |= ATA_PROT_NODATA;
863 tf.device |= 0x40;
864
865 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
866 if (err)
867 return 0;
868
869 return ata_tf_to_lba48(&tf);
870}
871
872/**
873 * ata_read_native_max_address - LBA28 native max query
874 * @dev: Device to query
875 *
876 * Performa an LBA28 size query upon the device in question. Return the
877 * actual LBA28 size or zero if the command fails.
878 */
879
880static u64 ata_read_native_max_address(struct ata_device *dev)
881{
882 unsigned int err;
883 struct ata_taskfile tf;
884
885 ata_tf_init(dev, &tf);
886
887 tf.command = ATA_CMD_READ_NATIVE_MAX;
888 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
889 tf.protocol |= ATA_PROT_NODATA;
890 tf.device |= 0x40;
891
892 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
893 if (err)
894 return 0;
895
896 return ata_tf_to_lba(&tf);
897}
898
899/**
900 * ata_set_native_max_address_ext - LBA48 native max set
901 * @dev: Device to query
902 * @new_sectors: new max sectors value to set for the device
903 *
904 * Perform an LBA48 size set max upon the device in question. Return the
905 * actual LBA48 size or zero if the command fails.
906 */
907
908static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
909{
910 unsigned int err;
911 struct ata_taskfile tf;
912
913 new_sectors--;
914
915 ata_tf_init(dev, &tf);
916
917 tf.command = ATA_CMD_SET_MAX_EXT;
918 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
919 tf.protocol |= ATA_PROT_NODATA;
920 tf.device |= 0x40;
921
922 tf.lbal = (new_sectors >> 0) & 0xff;
923 tf.lbam = (new_sectors >> 8) & 0xff;
924 tf.lbah = (new_sectors >> 16) & 0xff;
925
926 tf.hob_lbal = (new_sectors >> 24) & 0xff;
927 tf.hob_lbam = (new_sectors >> 32) & 0xff;
928 tf.hob_lbah = (new_sectors >> 40) & 0xff;
929
930 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
931 if (err)
932 return 0;
933
934 return ata_tf_to_lba48(&tf);
935}
936
937/**
938 * ata_set_native_max_address - LBA28 native max set
939 * @dev: Device to query
940 * @new_sectors: new max sectors value to set for the device
941 *
942 * Perform an LBA28 size set max upon the device in question. Return the
943 * actual LBA28 size or zero if the command fails.
944 */
945
946static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
947{
948 unsigned int err;
949 struct ata_taskfile tf;
950
951 new_sectors--;
952
953 ata_tf_init(dev, &tf);
954
955 tf.command = ATA_CMD_SET_MAX;
956 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
957 tf.protocol |= ATA_PROT_NODATA;
958
959 tf.lbal = (new_sectors >> 0) & 0xff;
960 tf.lbam = (new_sectors >> 8) & 0xff;
961 tf.lbah = (new_sectors >> 16) & 0xff;
962 tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
963
964 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
965 if (err)
966 return 0;
967
968 return ata_tf_to_lba(&tf);
969}
970
971/**
972 * ata_hpa_resize - Resize a device with an HPA set
973 * @dev: Device to resize
974 *
975 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
976 * it if required to the full size of the media. The caller must check
977 * the drive has the HPA feature set enabled.
978 */
979
980static u64 ata_hpa_resize(struct ata_device *dev)
981{
982 u64 sectors = dev->n_sectors;
983 u64 hpa_sectors;
984
985 if (ata_id_has_lba48(dev->id))
986 hpa_sectors = ata_read_native_max_address_ext(dev);
987 else
988 hpa_sectors = ata_read_native_max_address(dev);
989
990 if (hpa_sectors > sectors) {
991 ata_dev_printk(dev, KERN_INFO,
992 "Host Protected Area detected:\n"
993 "\tcurrent size: %lld sectors\n"
994 "\tnative size: %lld sectors\n",
995 (long long)sectors, (long long)hpa_sectors);
996
997 if (ata_ignore_hpa) {
998 if (ata_id_has_lba48(dev->id))
999 hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
1000 else
1001 hpa_sectors = ata_set_native_max_address(dev,
1002 hpa_sectors);
1003
1004 if (hpa_sectors) {
1005 ata_dev_printk(dev, KERN_INFO, "native size "
1006 "increased to %lld sectors\n",
1007 (long long)hpa_sectors);
1008 return hpa_sectors;
1009 }
1010 }
1011 } else if (hpa_sectors < sectors)
1012 ata_dev_printk(dev, KERN_WARNING, "%s 1: hpa sectors (%lld) "
1013 "is smaller than sectors (%lld)\n", __FUNCTION__,
1014 (long long)hpa_sectors, (long long)sectors);
1015
1016 return sectors;
1017}
1018
1019static u64 ata_id_n_sectors(const u16 *id)
1020{
1021 if (ata_id_has_lba(id)) {
1022 if (ata_id_has_lba48(id))
1023 return ata_id_u64(id, 100);
1024 else
1025 return ata_id_u32(id, 60);
1026 } else {
1027 if (ata_id_current_chs_valid(id))
1028 return ata_id_u32(id, 57);
1029 else
1030 return id[1] * id[3] * id[6];
1031 }
1032}
1033
1034/**
1035 * ata_id_to_dma_mode - Identify DMA mode from id block
1036 * @dev: device to identify
1037 * @unknown: mode to assume if we cannot tell
1038 *
1039 * Set up the timing values for the device based upon the identify
1040 * reported values for the DMA mode. This function is used by drivers
1041 * which rely upon firmware configured modes, but wish to report the
1042 * mode correctly when possible.
1043 *
1044 * In addition we emit similarly formatted messages to the default
1045 * ata_dev_set_mode handler, in order to provide consistency of
1046 * presentation.
1047 */
1048
1049void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1050{
1051 unsigned int mask;
1052 u8 mode;
1053
1054 /* Pack the DMA modes */
1055 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1056 if (dev->id[53] & 0x04)
1057 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1058
1059 /* Select the mode in use */
1060 mode = ata_xfer_mask2mode(mask);
1061
1062 if (mode != 0) {
1063 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1064 ata_mode_string(mask));
1065 } else {
1066 /* SWDMA perhaps ? */
1067 mode = unknown;
1068 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1069 }
1070
1071 /* Configure the device reporting */
1072 dev->xfer_mode = mode;
1073 dev->xfer_shift = ata_xfer_mode2shift(mode);
1074}
1075
1076/**
1077 * ata_noop_dev_select - Select device 0/1 on ATA bus
1078 * @ap: ATA channel to manipulate
1079 * @device: ATA device (numbered from zero) to select
1080 *
1081 * This function performs no actual function.
1082 *
1083 * May be used as the dev_select() entry in ata_port_operations.
1084 *
1085 * LOCKING:
1086 * caller.
1087 */
1088void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1089{
1090}
1091
1092
1093/**
1094 * ata_std_dev_select - Select device 0/1 on ATA bus
1095 * @ap: ATA channel to manipulate
1096 * @device: ATA device (numbered from zero) to select
1097 *
1098 * Use the method defined in the ATA specification to
1099 * make either device 0, or device 1, active on the
1100 * ATA channel. Works with both PIO and MMIO.
1101 *
1102 * May be used as the dev_select() entry in ata_port_operations.
1103 *
1104 * LOCKING:
1105 * caller.
1106 */
1107
1108void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1109{
1110 u8 tmp;
1111
1112 if (device == 0)
1113 tmp = ATA_DEVICE_OBS;
1114 else
1115 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1116
1117 iowrite8(tmp, ap->ioaddr.device_addr);
1118 ata_pause(ap); /* needed; also flushes, for mmio */
1119}
1120
1121/**
1122 * ata_dev_select - Select device 0/1 on ATA bus
1123 * @ap: ATA channel to manipulate
1124 * @device: ATA device (numbered from zero) to select
1125 * @wait: non-zero to wait for Status register BSY bit to clear
1126 * @can_sleep: non-zero if context allows sleeping
1127 *
1128 * Use the method defined in the ATA specification to
1129 * make either device 0, or device 1, active on the
1130 * ATA channel.
1131 *
1132 * This is a high-level version of ata_std_dev_select(),
1133 * which additionally provides the services of inserting
1134 * the proper pauses and status polling, where needed.
1135 *
1136 * LOCKING:
1137 * caller.
1138 */
1139
1140void ata_dev_select(struct ata_port *ap, unsigned int device,
1141 unsigned int wait, unsigned int can_sleep)
1142{
1143 if (ata_msg_probe(ap))
1144 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1145 "device %u, wait %u\n", device, wait);
1146
1147 if (wait)
1148 ata_wait_idle(ap);
1149
1150 ap->ops->dev_select(ap, device);
1151
1152 if (wait) {
1153 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1154 msleep(150);
1155 ata_wait_idle(ap);
1156 }
1157}
1158
1159/**
1160 * ata_dump_id - IDENTIFY DEVICE info debugging output
1161 * @id: IDENTIFY DEVICE page to dump
1162 *
1163 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1164 * page.
1165 *
1166 * LOCKING:
1167 * caller.
1168 */
1169
1170static inline void ata_dump_id(const u16 *id)
1171{
1172 DPRINTK("49==0x%04x "
1173 "53==0x%04x "
1174 "63==0x%04x "
1175 "64==0x%04x "
1176 "75==0x%04x \n",
1177 id[49],
1178 id[53],
1179 id[63],
1180 id[64],
1181 id[75]);
1182 DPRINTK("80==0x%04x "
1183 "81==0x%04x "
1184 "82==0x%04x "
1185 "83==0x%04x "
1186 "84==0x%04x \n",
1187 id[80],
1188 id[81],
1189 id[82],
1190 id[83],
1191 id[84]);
1192 DPRINTK("88==0x%04x "
1193 "93==0x%04x\n",
1194 id[88],
1195 id[93]);
1196}
1197
1198/**
1199 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1200 * @id: IDENTIFY data to compute xfer mask from
1201 *
1202 * Compute the xfermask for this device. This is not as trivial
1203 * as it seems if we must consider early devices correctly.
1204 *
1205 * FIXME: pre IDE drive timing (do we care ?).
1206 *
1207 * LOCKING:
1208 * None.
1209 *
1210 * RETURNS:
1211 * Computed xfermask
1212 */
1213static unsigned int ata_id_xfermask(const u16 *id)
1214{
1215 unsigned int pio_mask, mwdma_mask, udma_mask;
1216
1217 /* Usual case. Word 53 indicates word 64 is valid */
1218 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1219 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1220 pio_mask <<= 3;
1221 pio_mask |= 0x7;
1222 } else {
1223 /* If word 64 isn't valid then Word 51 high byte holds
1224 * the PIO timing number for the maximum. Turn it into
1225 * a mask.
1226 */
1227 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1228 if (mode < 5) /* Valid PIO range */
1229 pio_mask = (2 << mode) - 1;
1230 else
1231 pio_mask = 1;
1232
1233 /* But wait.. there's more. Design your standards by
1234 * committee and you too can get a free iordy field to
1235 * process. However its the speeds not the modes that
1236 * are supported... Note drivers using the timing API
1237 * will get this right anyway
1238 */
1239 }
1240
1241 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1242
1243 if (ata_id_is_cfa(id)) {
1244 /*
1245 * Process compact flash extended modes
1246 */
1247 int pio = id[163] & 0x7;
1248 int dma = (id[163] >> 3) & 7;
1249
1250 if (pio)
1251 pio_mask |= (1 << 5);
1252 if (pio > 1)
1253 pio_mask |= (1 << 6);
1254 if (dma)
1255 mwdma_mask |= (1 << 3);
1256 if (dma > 1)
1257 mwdma_mask |= (1 << 4);
1258 }
1259
1260 udma_mask = 0;
1261 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1262 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1263
1264 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1265}
1266
1267/**
1268 * ata_port_queue_task - Queue port_task
1269 * @ap: The ata_port to queue port_task for
1270 * @fn: workqueue function to be scheduled
1271 * @data: data for @fn to use
1272 * @delay: delay time for workqueue function
1273 *
1274 * Schedule @fn(@data) for execution after @delay jiffies using
1275 * port_task. There is one port_task per port and it's the
1276 * user(low level driver)'s responsibility to make sure that only
1277 * one task is active at any given time.
1278 *
1279 * libata core layer takes care of synchronization between
1280 * port_task and EH. ata_port_queue_task() may be ignored for EH
1281 * synchronization.
1282 *
1283 * LOCKING:
1284 * Inherited from caller.
1285 */
1286void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1287 unsigned long delay)
1288{
1289 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1290 ap->port_task_data = data;
1291
1292 /* may fail if ata_port_flush_task() in progress */
1293 queue_delayed_work(ata_wq, &ap->port_task, delay);
1294}
1295
1296/**
1297 * ata_port_flush_task - Flush port_task
1298 * @ap: The ata_port to flush port_task for
1299 *
1300 * After this function completes, port_task is guranteed not to
1301 * be running or scheduled.
1302 *
1303 * LOCKING:
1304 * Kernel thread context (may sleep)
1305 */
1306void ata_port_flush_task(struct ata_port *ap)
1307{
1308 DPRINTK("ENTER\n");
1309
1310 cancel_rearming_delayed_work(&ap->port_task);
1311
1312 if (ata_msg_ctl(ap))
1313 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1314}
1315
1316static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1317{
1318 struct completion *waiting = qc->private_data;
1319
1320 complete(waiting);
1321}
1322
1323/**
1324 * ata_exec_internal_sg - execute libata internal command
1325 * @dev: Device to which the command is sent
1326 * @tf: Taskfile registers for the command and the result
1327 * @cdb: CDB for packet command
1328 * @dma_dir: Data tranfer direction of the command
1329 * @sg: sg list for the data buffer of the command
1330 * @n_elem: Number of sg entries
1331 *
1332 * Executes libata internal command with timeout. @tf contains
1333 * command on entry and result on return. Timeout and error
1334 * conditions are reported via return value. No recovery action
1335 * is taken after a command times out. It's caller's duty to
1336 * clean up after timeout.
1337 *
1338 * LOCKING:
1339 * None. Should be called with kernel context, might sleep.
1340 *
1341 * RETURNS:
1342 * Zero on success, AC_ERR_* mask on failure
1343 */
1344unsigned ata_exec_internal_sg(struct ata_device *dev,
1345 struct ata_taskfile *tf, const u8 *cdb,
1346 int dma_dir, struct scatterlist *sg,
1347 unsigned int n_elem)
1348{
1349 struct ata_link *link = dev->link;
1350 struct ata_port *ap = link->ap;
1351 u8 command = tf->command;
1352 struct ata_queued_cmd *qc;
1353 unsigned int tag, preempted_tag;
1354 u32 preempted_sactive, preempted_qc_active;
1355 DECLARE_COMPLETION_ONSTACK(wait);
1356 unsigned long flags;
1357 unsigned int err_mask;
1358 int rc;
1359
1360 spin_lock_irqsave(ap->lock, flags);
1361
1362 /* no internal command while frozen */
1363 if (ap->pflags & ATA_PFLAG_FROZEN) {
1364 spin_unlock_irqrestore(ap->lock, flags);
1365 return AC_ERR_SYSTEM;
1366 }
1367
1368 /* initialize internal qc */
1369
1370 /* XXX: Tag 0 is used for drivers with legacy EH as some
1371 * drivers choke if any other tag is given. This breaks
1372 * ata_tag_internal() test for those drivers. Don't use new
1373 * EH stuff without converting to it.
1374 */
1375 if (ap->ops->error_handler)
1376 tag = ATA_TAG_INTERNAL;
1377 else
1378 tag = 0;
1379
1380 if (test_and_set_bit(tag, &ap->qc_allocated))
1381 BUG();
1382 qc = __ata_qc_from_tag(ap, tag);
1383
1384 qc->tag = tag;
1385 qc->scsicmd = NULL;
1386 qc->ap = ap;
1387 qc->dev = dev;
1388 ata_qc_reinit(qc);
1389
1390 preempted_tag = link->active_tag;
1391 preempted_sactive = link->sactive;
1392 preempted_qc_active = ap->qc_active;
1393 link->active_tag = ATA_TAG_POISON;
1394 link->sactive = 0;
1395 ap->qc_active = 0;
1396
1397 /* prepare & issue qc */
1398 qc->tf = *tf;
1399 if (cdb)
1400 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1401 qc->flags |= ATA_QCFLAG_RESULT_TF;
1402 qc->dma_dir = dma_dir;
1403 if (dma_dir != DMA_NONE) {
1404 unsigned int i, buflen = 0;
1405
1406 for (i = 0; i < n_elem; i++)
1407 buflen += sg[i].length;
1408
1409 ata_sg_init(qc, sg, n_elem);
1410 qc->nbytes = buflen;
1411 }
1412
1413 qc->private_data = &wait;
1414 qc->complete_fn = ata_qc_complete_internal;
1415
1416 ata_qc_issue(qc);
1417
1418 spin_unlock_irqrestore(ap->lock, flags);
1419
1420 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1421
1422 ata_port_flush_task(ap);
1423
1424 if (!rc) {
1425 spin_lock_irqsave(ap->lock, flags);
1426
1427 /* We're racing with irq here. If we lose, the
1428 * following test prevents us from completing the qc
1429 * twice. If we win, the port is frozen and will be
1430 * cleaned up by ->post_internal_cmd().
1431 */
1432 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1433 qc->err_mask |= AC_ERR_TIMEOUT;
1434
1435 if (ap->ops->error_handler)
1436 ata_port_freeze(ap);
1437 else
1438 ata_qc_complete(qc);
1439
1440 if (ata_msg_warn(ap))
1441 ata_dev_printk(dev, KERN_WARNING,
1442 "qc timeout (cmd 0x%x)\n", command);
1443 }
1444
1445 spin_unlock_irqrestore(ap->lock, flags);
1446 }
1447
1448 /* do post_internal_cmd */
1449 if (ap->ops->post_internal_cmd)
1450 ap->ops->post_internal_cmd(qc);
1451
1452 /* perform minimal error analysis */
1453 if (qc->flags & ATA_QCFLAG_FAILED) {
1454 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1455 qc->err_mask |= AC_ERR_DEV;
1456
1457 if (!qc->err_mask)
1458 qc->err_mask |= AC_ERR_OTHER;
1459
1460 if (qc->err_mask & ~AC_ERR_OTHER)
1461 qc->err_mask &= ~AC_ERR_OTHER;
1462 }
1463
1464 /* finish up */
1465 spin_lock_irqsave(ap->lock, flags);
1466
1467 *tf = qc->result_tf;
1468 err_mask = qc->err_mask;
1469
1470 ata_qc_free(qc);
1471 link->active_tag = preempted_tag;
1472 link->sactive = preempted_sactive;
1473 ap->qc_active = preempted_qc_active;
1474
1475 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1476 * Until those drivers are fixed, we detect the condition
1477 * here, fail the command with AC_ERR_SYSTEM and reenable the
1478 * port.
1479 *
1480 * Note that this doesn't change any behavior as internal
1481 * command failure results in disabling the device in the
1482 * higher layer for LLDDs without new reset/EH callbacks.
1483 *
1484 * Kill the following code as soon as those drivers are fixed.
1485 */
1486 if (ap->flags & ATA_FLAG_DISABLED) {
1487 err_mask |= AC_ERR_SYSTEM;
1488 ata_port_probe(ap);
1489 }
1490
1491 spin_unlock_irqrestore(ap->lock, flags);
1492
1493 return err_mask;
1494}
1495
1496/**
1497 * ata_exec_internal - execute libata internal command
1498 * @dev: Device to which the command is sent
1499 * @tf: Taskfile registers for the command and the result
1500 * @cdb: CDB for packet command
1501 * @dma_dir: Data tranfer direction of the command
1502 * @buf: Data buffer of the command
1503 * @buflen: Length of data buffer
1504 *
1505 * Wrapper around ata_exec_internal_sg() which takes simple
1506 * buffer instead of sg list.
1507 *
1508 * LOCKING:
1509 * None. Should be called with kernel context, might sleep.
1510 *
1511 * RETURNS:
1512 * Zero on success, AC_ERR_* mask on failure
1513 */
1514unsigned ata_exec_internal(struct ata_device *dev,
1515 struct ata_taskfile *tf, const u8 *cdb,
1516 int dma_dir, void *buf, unsigned int buflen)
1517{
1518 struct scatterlist *psg = NULL, sg;
1519 unsigned int n_elem = 0;
1520
1521 if (dma_dir != DMA_NONE) {
1522 WARN_ON(!buf);
1523 sg_init_one(&sg, buf, buflen);
1524 psg = &sg;
1525 n_elem++;
1526 }
1527
1528 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
1529}
1530
1531/**
1532 * ata_do_simple_cmd - execute simple internal command
1533 * @dev: Device to which the command is sent
1534 * @cmd: Opcode to execute
1535 *
1536 * Execute a 'simple' command, that only consists of the opcode
1537 * 'cmd' itself, without filling any other registers
1538 *
1539 * LOCKING:
1540 * Kernel thread context (may sleep).
1541 *
1542 * RETURNS:
1543 * Zero on success, AC_ERR_* mask on failure
1544 */
1545unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1546{
1547 struct ata_taskfile tf;
1548
1549 ata_tf_init(dev, &tf);
1550
1551 tf.command = cmd;
1552 tf.flags |= ATA_TFLAG_DEVICE;
1553 tf.protocol = ATA_PROT_NODATA;
1554
1555 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1556}
1557
1558/**
1559 * ata_pio_need_iordy - check if iordy needed
1560 * @adev: ATA device
1561 *
1562 * Check if the current speed of the device requires IORDY. Used
1563 * by various controllers for chip configuration.
1564 */
1565
1566unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1567{
1568 /* Controller doesn't support IORDY. Probably a pointless check
1569 as the caller should know this */
1570 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1571 return 0;
1572 /* PIO3 and higher it is mandatory */
1573 if (adev->pio_mode > XFER_PIO_2)
1574 return 1;
1575 /* We turn it on when possible */
1576 if (ata_id_has_iordy(adev->id))
1577 return 1;
1578 return 0;
1579}
1580
1581/**
1582 * ata_pio_mask_no_iordy - Return the non IORDY mask
1583 * @adev: ATA device
1584 *
1585 * Compute the highest mode possible if we are not using iordy. Return
1586 * -1 if no iordy mode is available.
1587 */
1588
1589static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1590{
1591 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1592 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1593 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1594 /* Is the speed faster than the drive allows non IORDY ? */
1595 if (pio) {
1596 /* This is cycle times not frequency - watch the logic! */
1597 if (pio > 240) /* PIO2 is 240nS per cycle */
1598 return 3 << ATA_SHIFT_PIO;
1599 return 7 << ATA_SHIFT_PIO;
1600 }
1601 }
1602 return 3 << ATA_SHIFT_PIO;
1603}
1604
1605/**
1606 * ata_dev_read_id - Read ID data from the specified device
1607 * @dev: target device
1608 * @p_class: pointer to class of the target device (may be changed)
1609 * @flags: ATA_READID_* flags
1610 * @id: buffer to read IDENTIFY data into
1611 *
1612 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1613 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1614 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1615 * for pre-ATA4 drives.
1616 *
1617 * LOCKING:
1618 * Kernel thread context (may sleep)
1619 *
1620 * RETURNS:
1621 * 0 on success, -errno otherwise.
1622 */
1623int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1624 unsigned int flags, u16 *id)
1625{
1626 struct ata_port *ap = dev->link->ap;
1627 unsigned int class = *p_class;
1628 struct ata_taskfile tf;
1629 unsigned int err_mask = 0;
1630 const char *reason;
1631 int may_fallback = 1, tried_spinup = 0;
1632 int rc;
1633
1634 if (ata_msg_ctl(ap))
1635 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1636
1637 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1638 retry:
1639 ata_tf_init(dev, &tf);
1640
1641 switch (class) {
1642 case ATA_DEV_ATA:
1643 tf.command = ATA_CMD_ID_ATA;
1644 break;
1645 case ATA_DEV_ATAPI:
1646 tf.command = ATA_CMD_ID_ATAPI;
1647 break;
1648 default:
1649 rc = -ENODEV;
1650 reason = "unsupported class";
1651 goto err_out;
1652 }
1653
1654 tf.protocol = ATA_PROT_PIO;
1655
1656 /* Some devices choke if TF registers contain garbage. Make
1657 * sure those are properly initialized.
1658 */
1659 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1660
1661 /* Device presence detection is unreliable on some
1662 * controllers. Always poll IDENTIFY if available.
1663 */
1664 tf.flags |= ATA_TFLAG_POLLING;
1665
1666 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1667 id, sizeof(id[0]) * ATA_ID_WORDS);
1668 if (err_mask) {
1669 if (err_mask & AC_ERR_NODEV_HINT) {
1670 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1671 ap->print_id, dev->devno);
1672 return -ENOENT;
1673 }
1674
1675 /* Device or controller might have reported the wrong
1676 * device class. Give a shot at the other IDENTIFY if
1677 * the current one is aborted by the device.
1678 */
1679 if (may_fallback &&
1680 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1681 may_fallback = 0;
1682
1683 if (class == ATA_DEV_ATA)
1684 class = ATA_DEV_ATAPI;
1685 else
1686 class = ATA_DEV_ATA;
1687 goto retry;
1688 }
1689
1690 rc = -EIO;
1691 reason = "I/O error";
1692 goto err_out;
1693 }
1694
1695 /* Falling back doesn't make sense if ID data was read
1696 * successfully at least once.
1697 */
1698 may_fallback = 0;
1699
1700 swap_buf_le16(id, ATA_ID_WORDS);
1701
1702 /* sanity check */
1703 rc = -EINVAL;
1704 reason = "device reports invalid type";
1705
1706 if (class == ATA_DEV_ATA) {
1707 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1708 goto err_out;
1709 } else {
1710 if (ata_id_is_ata(id))
1711 goto err_out;
1712 }
1713
1714 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1715 tried_spinup = 1;
1716 /*
1717 * Drive powered-up in standby mode, and requires a specific
1718 * SET_FEATURES spin-up subcommand before it will accept
1719 * anything other than the original IDENTIFY command.
1720 */
1721 ata_tf_init(dev, &tf);
1722 tf.command = ATA_CMD_SET_FEATURES;
1723 tf.feature = SETFEATURES_SPINUP;
1724 tf.protocol = ATA_PROT_NODATA;
1725 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1726 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1727 if (err_mask && id[2] != 0x738c) {
1728 rc = -EIO;
1729 reason = "SPINUP failed";
1730 goto err_out;
1731 }
1732 /*
1733 * If the drive initially returned incomplete IDENTIFY info,
1734 * we now must reissue the IDENTIFY command.
1735 */
1736 if (id[2] == 0x37c8)
1737 goto retry;
1738 }
1739
1740 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1741 /*
1742 * The exact sequence expected by certain pre-ATA4 drives is:
1743 * SRST RESET
1744 * IDENTIFY
1745 * INITIALIZE DEVICE PARAMETERS
1746 * anything else..
1747 * Some drives were very specific about that exact sequence.
1748 */
1749 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1750 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1751 if (err_mask) {
1752 rc = -EIO;
1753 reason = "INIT_DEV_PARAMS failed";
1754 goto err_out;
1755 }
1756
1757 /* current CHS translation info (id[53-58]) might be
1758 * changed. reread the identify device info.
1759 */
1760 flags &= ~ATA_READID_POSTRESET;
1761 goto retry;
1762 }
1763 }
1764
1765 *p_class = class;
1766
1767 return 0;
1768
1769 err_out:
1770 if (ata_msg_warn(ap))
1771 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1772 "(%s, err_mask=0x%x)\n", reason, err_mask);
1773 return rc;
1774}
1775
1776static inline u8 ata_dev_knobble(struct ata_device *dev)
1777{
1778 struct ata_port *ap = dev->link->ap;
1779 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1780}
1781
1782static void ata_dev_config_ncq(struct ata_device *dev,
1783 char *desc, size_t desc_sz)
1784{
1785 struct ata_port *ap = dev->link->ap;
1786 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1787
1788 if (!ata_id_has_ncq(dev->id)) {
1789 desc[0] = '\0';
1790 return;
1791 }
1792 if (dev->horkage & ATA_HORKAGE_NONCQ) {
1793 snprintf(desc, desc_sz, "NCQ (not used)");
1794 return;
1795 }
1796 if (ap->flags & ATA_FLAG_NCQ) {
1797 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1798 dev->flags |= ATA_DFLAG_NCQ;
1799 }
1800
1801 if (hdepth >= ddepth)
1802 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1803 else
1804 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1805}
1806
1807/**
1808 * ata_dev_configure - Configure the specified ATA/ATAPI device
1809 * @dev: Target device to configure
1810 *
1811 * Configure @dev according to @dev->id. Generic and low-level
1812 * driver specific fixups are also applied.
1813 *
1814 * LOCKING:
1815 * Kernel thread context (may sleep)
1816 *
1817 * RETURNS:
1818 * 0 on success, -errno otherwise
1819 */
1820int ata_dev_configure(struct ata_device *dev)
1821{
1822 struct ata_port *ap = dev->link->ap;
1823 struct ata_eh_context *ehc = &dev->link->eh_context;
1824 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1825 const u16 *id = dev->id;
1826 unsigned int xfer_mask;
1827 char revbuf[7]; /* XYZ-99\0 */
1828 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1829 char modelbuf[ATA_ID_PROD_LEN+1];
1830 int rc;
1831
1832 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1833 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1834 __FUNCTION__);
1835 return 0;
1836 }
1837
1838 if (ata_msg_probe(ap))
1839 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1840
1841 /* set horkage */
1842 dev->horkage |= ata_dev_blacklisted(dev);
1843
1844 /* let ACPI work its magic */
1845 rc = ata_acpi_on_devcfg(dev);
1846 if (rc)
1847 return rc;
1848
1849 /* print device capabilities */
1850 if (ata_msg_probe(ap))
1851 ata_dev_printk(dev, KERN_DEBUG,
1852 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1853 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1854 __FUNCTION__,
1855 id[49], id[82], id[83], id[84],
1856 id[85], id[86], id[87], id[88]);
1857
1858 /* initialize to-be-configured parameters */
1859 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1860 dev->max_sectors = 0;
1861 dev->cdb_len = 0;
1862 dev->n_sectors = 0;
1863 dev->cylinders = 0;
1864 dev->heads = 0;
1865 dev->sectors = 0;
1866
1867 /*
1868 * common ATA, ATAPI feature tests
1869 */
1870
1871 /* find max transfer mode; for printk only */
1872 xfer_mask = ata_id_xfermask(id);
1873
1874 if (ata_msg_probe(ap))
1875 ata_dump_id(id);
1876
1877 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1878 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1879 sizeof(fwrevbuf));
1880
1881 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1882 sizeof(modelbuf));
1883
1884 /* ATA-specific feature tests */
1885 if (dev->class == ATA_DEV_ATA) {
1886 if (ata_id_is_cfa(id)) {
1887 if (id[162] & 1) /* CPRM may make this media unusable */
1888 ata_dev_printk(dev, KERN_WARNING,
1889 "supports DRM functions and may "
1890 "not be fully accessable.\n");
1891 snprintf(revbuf, 7, "CFA");
1892 }
1893 else
1894 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1895
1896 dev->n_sectors = ata_id_n_sectors(id);
1897
1898 if (dev->id[59] & 0x100)
1899 dev->multi_count = dev->id[59] & 0xff;
1900
1901 if (ata_id_has_lba(id)) {
1902 const char *lba_desc;
1903 char ncq_desc[20];
1904
1905 lba_desc = "LBA";
1906 dev->flags |= ATA_DFLAG_LBA;
1907 if (ata_id_has_lba48(id)) {
1908 dev->flags |= ATA_DFLAG_LBA48;
1909 lba_desc = "LBA48";
1910
1911 if (dev->n_sectors >= (1UL << 28) &&
1912 ata_id_has_flush_ext(id))
1913 dev->flags |= ATA_DFLAG_FLUSH_EXT;
1914 }
1915
1916 if (!(dev->horkage & ATA_HORKAGE_BROKEN_HPA) &&
1917 ata_id_hpa_enabled(dev->id))
1918 dev->n_sectors = ata_hpa_resize(dev);
1919
1920 /* config NCQ */
1921 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1922
1923 /* print device info to dmesg */
1924 if (ata_msg_drv(ap) && print_info) {
1925 ata_dev_printk(dev, KERN_INFO,
1926 "%s: %s, %s, max %s\n",
1927 revbuf, modelbuf, fwrevbuf,
1928 ata_mode_string(xfer_mask));
1929 ata_dev_printk(dev, KERN_INFO,
1930 "%Lu sectors, multi %u: %s %s\n",
1931 (unsigned long long)dev->n_sectors,
1932 dev->multi_count, lba_desc, ncq_desc);
1933 }
1934 } else {
1935 /* CHS */
1936
1937 /* Default translation */
1938 dev->cylinders = id[1];
1939 dev->heads = id[3];
1940 dev->sectors = id[6];
1941
1942 if (ata_id_current_chs_valid(id)) {
1943 /* Current CHS translation is valid. */
1944 dev->cylinders = id[54];
1945 dev->heads = id[55];
1946 dev->sectors = id[56];
1947 }
1948
1949 /* print device info to dmesg */
1950 if (ata_msg_drv(ap) && print_info) {
1951 ata_dev_printk(dev, KERN_INFO,
1952 "%s: %s, %s, max %s\n",
1953 revbuf, modelbuf, fwrevbuf,
1954 ata_mode_string(xfer_mask));
1955 ata_dev_printk(dev, KERN_INFO,
1956 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1957 (unsigned long long)dev->n_sectors,
1958 dev->multi_count, dev->cylinders,
1959 dev->heads, dev->sectors);
1960 }
1961 }
1962
1963 dev->cdb_len = 16;
1964 }
1965
1966 /* ATAPI-specific feature tests */
1967 else if (dev->class == ATA_DEV_ATAPI) {
1968 char *cdb_intr_string = "";
1969
1970 rc = atapi_cdb_len(id);
1971 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1972 if (ata_msg_warn(ap))
1973 ata_dev_printk(dev, KERN_WARNING,
1974 "unsupported CDB len\n");
1975 rc = -EINVAL;
1976 goto err_out_nosup;
1977 }
1978 dev->cdb_len = (unsigned int) rc;
1979
1980 if (ata_id_cdb_intr(dev->id)) {
1981 dev->flags |= ATA_DFLAG_CDB_INTR;
1982 cdb_intr_string = ", CDB intr";
1983 }
1984
1985 /* print device info to dmesg */
1986 if (ata_msg_drv(ap) && print_info)
1987 ata_dev_printk(dev, KERN_INFO,
1988 "ATAPI: %s, %s, max %s%s\n",
1989 modelbuf, fwrevbuf,
1990 ata_mode_string(xfer_mask),
1991 cdb_intr_string);
1992 }
1993
1994 /* determine max_sectors */
1995 dev->max_sectors = ATA_MAX_SECTORS;
1996 if (dev->flags & ATA_DFLAG_LBA48)
1997 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1998
1999 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2000 /* Let the user know. We don't want to disallow opens for
2001 rescue purposes, or in case the vendor is just a blithering
2002 idiot */
2003 if (print_info) {
2004 ata_dev_printk(dev, KERN_WARNING,
2005"Drive reports diagnostics failure. This may indicate a drive\n");
2006 ata_dev_printk(dev, KERN_WARNING,
2007"fault or invalid emulation. Contact drive vendor for information.\n");
2008 }
2009 }
2010
2011 /* limit bridge transfers to udma5, 200 sectors */
2012 if (ata_dev_knobble(dev)) {
2013 if (ata_msg_drv(ap) && print_info)
2014 ata_dev_printk(dev, KERN_INFO,
2015 "applying bridge limits\n");
2016 dev->udma_mask &= ATA_UDMA5;
2017 dev->max_sectors = ATA_MAX_SECTORS;
2018 }
2019
2020 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2021 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2022 dev->max_sectors);
2023
2024 if (ap->ops->dev_config)
2025 ap->ops->dev_config(dev);
2026
2027 if (ata_msg_probe(ap))
2028 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2029 __FUNCTION__, ata_chk_status(ap));
2030 return 0;
2031
2032err_out_nosup:
2033 if (ata_msg_probe(ap))
2034 ata_dev_printk(dev, KERN_DEBUG,
2035 "%s: EXIT, err\n", __FUNCTION__);
2036 return rc;
2037}
2038
2039/**
2040 * ata_cable_40wire - return 40 wire cable type
2041 * @ap: port
2042 *
2043 * Helper method for drivers which want to hardwire 40 wire cable
2044 * detection.
2045 */
2046
2047int ata_cable_40wire(struct ata_port *ap)
2048{
2049 return ATA_CBL_PATA40;
2050}
2051
2052/**
2053 * ata_cable_80wire - return 80 wire cable type
2054 * @ap: port
2055 *
2056 * Helper method for drivers which want to hardwire 80 wire cable
2057 * detection.
2058 */
2059
2060int ata_cable_80wire(struct ata_port *ap)
2061{
2062 return ATA_CBL_PATA80;
2063}
2064
2065/**
2066 * ata_cable_unknown - return unknown PATA cable.
2067 * @ap: port
2068 *
2069 * Helper method for drivers which have no PATA cable detection.
2070 */
2071
2072int ata_cable_unknown(struct ata_port *ap)
2073{
2074 return ATA_CBL_PATA_UNK;
2075}
2076
2077/**
2078 * ata_cable_sata - return SATA cable type
2079 * @ap: port
2080 *
2081 * Helper method for drivers which have SATA cables
2082 */
2083
2084int ata_cable_sata(struct ata_port *ap)
2085{
2086 return ATA_CBL_SATA;
2087}
2088
2089/**
2090 * ata_bus_probe - Reset and probe ATA bus
2091 * @ap: Bus to probe
2092 *
2093 * Master ATA bus probing function. Initiates a hardware-dependent
2094 * bus reset, then attempts to identify any devices found on
2095 * the bus.
2096 *
2097 * LOCKING:
2098 * PCI/etc. bus probe sem.
2099 *
2100 * RETURNS:
2101 * Zero on success, negative errno otherwise.
2102 */
2103
2104int ata_bus_probe(struct ata_port *ap)
2105{
2106 unsigned int classes[ATA_MAX_DEVICES];
2107 int tries[ATA_MAX_DEVICES];
2108 int rc;
2109 struct ata_device *dev;
2110
2111 ata_port_probe(ap);
2112
2113 ata_link_for_each_dev(dev, &ap->link)
2114 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2115
2116 retry:
2117 /* reset and determine device classes */
2118 ap->ops->phy_reset(ap);
2119
2120 ata_link_for_each_dev(dev, &ap->link) {
2121 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2122 dev->class != ATA_DEV_UNKNOWN)
2123 classes[dev->devno] = dev->class;
2124 else
2125 classes[dev->devno] = ATA_DEV_NONE;
2126
2127 dev->class = ATA_DEV_UNKNOWN;
2128 }
2129
2130 ata_port_probe(ap);
2131
2132 /* after the reset the device state is PIO 0 and the controller
2133 state is undefined. Record the mode */
2134
2135 ata_link_for_each_dev(dev, &ap->link)
2136 dev->pio_mode = XFER_PIO_0;
2137
2138 /* read IDENTIFY page and configure devices. We have to do the identify
2139 specific sequence bass-ackwards so that PDIAG- is released by
2140 the slave device */
2141
2142 ata_link_for_each_dev(dev, &ap->link) {
2143 if (tries[dev->devno])
2144 dev->class = classes[dev->devno];
2145
2146 if (!ata_dev_enabled(dev))
2147 continue;
2148
2149 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2150 dev->id);
2151 if (rc)
2152 goto fail;
2153 }
2154
2155 /* Now ask for the cable type as PDIAG- should have been released */
2156 if (ap->ops->cable_detect)
2157 ap->cbl = ap->ops->cable_detect(ap);
2158
2159 /* After the identify sequence we can now set up the devices. We do
2160 this in the normal order so that the user doesn't get confused */
2161
2162 ata_link_for_each_dev(dev, &ap->link) {
2163 if (!ata_dev_enabled(dev))
2164 continue;
2165
2166 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2167 rc = ata_dev_configure(dev);
2168 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2169 if (rc)
2170 goto fail;
2171 }
2172
2173 /* configure transfer mode */
2174 rc = ata_set_mode(&ap->link, &dev);
2175 if (rc)
2176 goto fail;
2177
2178 ata_link_for_each_dev(dev, &ap->link)
2179 if (ata_dev_enabled(dev))
2180 return 0;
2181
2182 /* no device present, disable port */
2183 ata_port_disable(ap);
2184 ap->ops->port_disable(ap);
2185 return -ENODEV;
2186
2187 fail:
2188 tries[dev->devno]--;
2189
2190 switch (rc) {
2191 case -EINVAL:
2192 /* eeek, something went very wrong, give up */
2193 tries[dev->devno] = 0;
2194 break;
2195
2196 case -ENODEV:
2197 /* give it just one more chance */
2198 tries[dev->devno] = min(tries[dev->devno], 1);
2199 case -EIO:
2200 if (tries[dev->devno] == 1) {
2201 /* This is the last chance, better to slow
2202 * down than lose it.
2203 */
2204 sata_down_spd_limit(&ap->link);
2205 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2206 }
2207 }
2208
2209 if (!tries[dev->devno])
2210 ata_dev_disable(dev);
2211
2212 goto retry;
2213}
2214
2215/**
2216 * ata_port_probe - Mark port as enabled
2217 * @ap: Port for which we indicate enablement
2218 *
2219 * Modify @ap data structure such that the system
2220 * thinks that the entire port is enabled.
2221 *
2222 * LOCKING: host lock, or some other form of
2223 * serialization.
2224 */
2225
2226void ata_port_probe(struct ata_port *ap)
2227{
2228 ap->flags &= ~ATA_FLAG_DISABLED;
2229}
2230
2231/**
2232 * sata_print_link_status - Print SATA link status
2233 * @link: SATA link to printk link status about
2234 *
2235 * This function prints link speed and status of a SATA link.
2236 *
2237 * LOCKING:
2238 * None.
2239 */
2240void sata_print_link_status(struct ata_link *link)
2241{
2242 u32 sstatus, scontrol, tmp;
2243
2244 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2245 return;
2246 sata_scr_read(link, SCR_CONTROL, &scontrol);
2247
2248 if (ata_link_online(link)) {
2249 tmp = (sstatus >> 4) & 0xf;
2250 ata_link_printk(link, KERN_INFO,
2251 "SATA link up %s (SStatus %X SControl %X)\n",
2252 sata_spd_string(tmp), sstatus, scontrol);
2253 } else {
2254 ata_link_printk(link, KERN_INFO,
2255 "SATA link down (SStatus %X SControl %X)\n",
2256 sstatus, scontrol);
2257 }
2258}
2259
2260/**
2261 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2262 * @ap: SATA port associated with target SATA PHY.
2263 *
2264 * This function issues commands to standard SATA Sxxx
2265 * PHY registers, to wake up the phy (and device), and
2266 * clear any reset condition.
2267 *
2268 * LOCKING:
2269 * PCI/etc. bus probe sem.
2270 *
2271 */
2272void __sata_phy_reset(struct ata_port *ap)
2273{
2274 struct ata_link *link = &ap->link;
2275 unsigned long timeout = jiffies + (HZ * 5);
2276 u32 sstatus;
2277
2278 if (ap->flags & ATA_FLAG_SATA_RESET) {
2279 /* issue phy wake/reset */
2280 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
2281 /* Couldn't find anything in SATA I/II specs, but
2282 * AHCI-1.1 10.4.2 says at least 1 ms. */
2283 mdelay(1);
2284 }
2285 /* phy wake/clear reset */
2286 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
2287
2288 /* wait for phy to become ready, if necessary */
2289 do {
2290 msleep(200);
2291 sata_scr_read(link, SCR_STATUS, &sstatus);
2292 if ((sstatus & 0xf) != 1)
2293 break;
2294 } while (time_before(jiffies, timeout));
2295
2296 /* print link status */
2297 sata_print_link_status(link);
2298
2299 /* TODO: phy layer with polling, timeouts, etc. */
2300 if (!ata_link_offline(link))
2301 ata_port_probe(ap);
2302 else
2303 ata_port_disable(ap);
2304
2305 if (ap->flags & ATA_FLAG_DISABLED)
2306 return;
2307
2308 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2309 ata_port_disable(ap);
2310 return;
2311 }
2312
2313 ap->cbl = ATA_CBL_SATA;
2314}
2315
2316/**
2317 * sata_phy_reset - Reset SATA bus.
2318 * @ap: SATA port associated with target SATA PHY.
2319 *
2320 * This function resets the SATA bus, and then probes
2321 * the bus for devices.
2322 *
2323 * LOCKING:
2324 * PCI/etc. bus probe sem.
2325 *
2326 */
2327void sata_phy_reset(struct ata_port *ap)
2328{
2329 __sata_phy_reset(ap);
2330 if (ap->flags & ATA_FLAG_DISABLED)
2331 return;
2332 ata_bus_reset(ap);
2333}
2334
2335/**
2336 * ata_dev_pair - return other device on cable
2337 * @adev: device
2338 *
2339 * Obtain the other device on the same cable, or if none is
2340 * present NULL is returned
2341 */
2342
2343struct ata_device *ata_dev_pair(struct ata_device *adev)
2344{
2345 struct ata_link *link = adev->link;
2346 struct ata_device *pair = &link->device[1 - adev->devno];
2347 if (!ata_dev_enabled(pair))
2348 return NULL;
2349 return pair;
2350}
2351
2352/**
2353 * ata_port_disable - Disable port.
2354 * @ap: Port to be disabled.
2355 *
2356 * Modify @ap data structure such that the system
2357 * thinks that the entire port is disabled, and should
2358 * never attempt to probe or communicate with devices
2359 * on this port.
2360 *
2361 * LOCKING: host lock, or some other form of
2362 * serialization.
2363 */
2364
2365void ata_port_disable(struct ata_port *ap)
2366{
2367 ap->link.device[0].class = ATA_DEV_NONE;
2368 ap->link.device[1].class = ATA_DEV_NONE;
2369 ap->flags |= ATA_FLAG_DISABLED;
2370}
2371
2372/**
2373 * sata_down_spd_limit - adjust SATA spd limit downward
2374 * @link: Link to adjust SATA spd limit for
2375 *
2376 * Adjust SATA spd limit of @link downward. Note that this
2377 * function only adjusts the limit. The change must be applied
2378 * using sata_set_spd().
2379 *
2380 * LOCKING:
2381 * Inherited from caller.
2382 *
2383 * RETURNS:
2384 * 0 on success, negative errno on failure
2385 */
2386int sata_down_spd_limit(struct ata_link *link)
2387{
2388 u32 sstatus, spd, mask;
2389 int rc, highbit;
2390
2391 if (!sata_scr_valid(link))
2392 return -EOPNOTSUPP;
2393
2394 /* If SCR can be read, use it to determine the current SPD.
2395 * If not, use cached value in link->sata_spd.
2396 */
2397 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2398 if (rc == 0)
2399 spd = (sstatus >> 4) & 0xf;
2400 else
2401 spd = link->sata_spd;
2402
2403 mask = link->sata_spd_limit;
2404 if (mask <= 1)
2405 return -EINVAL;
2406
2407 /* unconditionally mask off the highest bit */
2408 highbit = fls(mask) - 1;
2409 mask &= ~(1 << highbit);
2410
2411 /* Mask off all speeds higher than or equal to the current
2412 * one. Force 1.5Gbps if current SPD is not available.
2413 */
2414 if (spd > 1)
2415 mask &= (1 << (spd - 1)) - 1;
2416 else
2417 mask &= 1;
2418
2419 /* were we already at the bottom? */
2420 if (!mask)
2421 return -EINVAL;
2422
2423 link->sata_spd_limit = mask;
2424
2425 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2426 sata_spd_string(fls(mask)));
2427
2428 return 0;
2429}
2430
2431static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2432{
2433 u32 spd, limit;
2434
2435 if (link->sata_spd_limit == UINT_MAX)
2436 limit = 0;
2437 else
2438 limit = fls(link->sata_spd_limit);
2439
2440 spd = (*scontrol >> 4) & 0xf;
2441 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2442
2443 return spd != limit;
2444}
2445
2446/**
2447 * sata_set_spd_needed - is SATA spd configuration needed
2448 * @link: Link in question
2449 *
2450 * Test whether the spd limit in SControl matches
2451 * @link->sata_spd_limit. This function is used to determine
2452 * whether hardreset is necessary to apply SATA spd
2453 * configuration.
2454 *
2455 * LOCKING:
2456 * Inherited from caller.
2457 *
2458 * RETURNS:
2459 * 1 if SATA spd configuration is needed, 0 otherwise.
2460 */
2461int sata_set_spd_needed(struct ata_link *link)
2462{
2463 u32 scontrol;
2464
2465 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2466 return 0;
2467
2468 return __sata_set_spd_needed(link, &scontrol);
2469}
2470
2471/**
2472 * sata_set_spd - set SATA spd according to spd limit
2473 * @link: Link to set SATA spd for
2474 *
2475 * Set SATA spd of @link according to sata_spd_limit.
2476 *
2477 * LOCKING:
2478 * Inherited from caller.
2479 *
2480 * RETURNS:
2481 * 0 if spd doesn't need to be changed, 1 if spd has been
2482 * changed. Negative errno if SCR registers are inaccessible.
2483 */
2484int sata_set_spd(struct ata_link *link)
2485{
2486 u32 scontrol;
2487 int rc;
2488
2489 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2490 return rc;
2491
2492 if (!__sata_set_spd_needed(link, &scontrol))
2493 return 0;
2494
2495 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2496 return rc;
2497
2498 return 1;
2499}
2500
2501/*
2502 * This mode timing computation functionality is ported over from
2503 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2504 */
2505/*
2506 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2507 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2508 * for UDMA6, which is currently supported only by Maxtor drives.
2509 *
2510 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2511 */
2512
2513static const struct ata_timing ata_timing[] = {
2514
2515 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2516 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2517 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2518 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2519
2520 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2521 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2522 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2523 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2524 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2525
2526/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2527
2528 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2529 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2530 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2531
2532 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2533 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2534 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2535
2536 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2537 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2538 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2539 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2540
2541 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2542 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2543 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2544
2545/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2546
2547 { 0xFF }
2548};
2549
2550#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2551#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2552
2553static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2554{
2555 q->setup = EZ(t->setup * 1000, T);
2556 q->act8b = EZ(t->act8b * 1000, T);
2557 q->rec8b = EZ(t->rec8b * 1000, T);
2558 q->cyc8b = EZ(t->cyc8b * 1000, T);
2559 q->active = EZ(t->active * 1000, T);
2560 q->recover = EZ(t->recover * 1000, T);
2561 q->cycle = EZ(t->cycle * 1000, T);
2562 q->udma = EZ(t->udma * 1000, UT);
2563}
2564
2565void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2566 struct ata_timing *m, unsigned int what)
2567{
2568 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2569 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2570 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2571 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2572 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2573 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2574 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2575 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2576}
2577
2578static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2579{
2580 const struct ata_timing *t;
2581
2582 for (t = ata_timing; t->mode != speed; t++)
2583 if (t->mode == 0xFF)
2584 return NULL;
2585 return t;
2586}
2587
2588int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2589 struct ata_timing *t, int T, int UT)
2590{
2591 const struct ata_timing *s;
2592 struct ata_timing p;
2593
2594 /*
2595 * Find the mode.
2596 */
2597
2598 if (!(s = ata_timing_find_mode(speed)))
2599 return -EINVAL;
2600
2601 memcpy(t, s, sizeof(*s));
2602
2603 /*
2604 * If the drive is an EIDE drive, it can tell us it needs extended
2605 * PIO/MW_DMA cycle timing.
2606 */
2607
2608 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2609 memset(&p, 0, sizeof(p));
2610 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2611 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2612 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2613 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2614 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2615 }
2616 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2617 }
2618
2619 /*
2620 * Convert the timing to bus clock counts.
2621 */
2622
2623 ata_timing_quantize(t, t, T, UT);
2624
2625 /*
2626 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2627 * S.M.A.R.T * and some other commands. We have to ensure that the
2628 * DMA cycle timing is slower/equal than the fastest PIO timing.
2629 */
2630
2631 if (speed > XFER_PIO_6) {
2632 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2633 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2634 }
2635
2636 /*
2637 * Lengthen active & recovery time so that cycle time is correct.
2638 */
2639
2640 if (t->act8b + t->rec8b < t->cyc8b) {
2641 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2642 t->rec8b = t->cyc8b - t->act8b;
2643 }
2644
2645 if (t->active + t->recover < t->cycle) {
2646 t->active += (t->cycle - (t->active + t->recover)) / 2;
2647 t->recover = t->cycle - t->active;
2648 }
2649
2650 /* In a few cases quantisation may produce enough errors to
2651 leave t->cycle too low for the sum of active and recovery
2652 if so we must correct this */
2653 if (t->active + t->recover > t->cycle)
2654 t->cycle = t->active + t->recover;
2655
2656 return 0;
2657}
2658
2659/**
2660 * ata_down_xfermask_limit - adjust dev xfer masks downward
2661 * @dev: Device to adjust xfer masks
2662 * @sel: ATA_DNXFER_* selector
2663 *
2664 * Adjust xfer masks of @dev downward. Note that this function
2665 * does not apply the change. Invoking ata_set_mode() afterwards
2666 * will apply the limit.
2667 *
2668 * LOCKING:
2669 * Inherited from caller.
2670 *
2671 * RETURNS:
2672 * 0 on success, negative errno on failure
2673 */
2674int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2675{
2676 char buf[32];
2677 unsigned int orig_mask, xfer_mask;
2678 unsigned int pio_mask, mwdma_mask, udma_mask;
2679 int quiet, highbit;
2680
2681 quiet = !!(sel & ATA_DNXFER_QUIET);
2682 sel &= ~ATA_DNXFER_QUIET;
2683
2684 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2685 dev->mwdma_mask,
2686 dev->udma_mask);
2687 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2688
2689 switch (sel) {
2690 case ATA_DNXFER_PIO:
2691 highbit = fls(pio_mask) - 1;
2692 pio_mask &= ~(1 << highbit);
2693 break;
2694
2695 case ATA_DNXFER_DMA:
2696 if (udma_mask) {
2697 highbit = fls(udma_mask) - 1;
2698 udma_mask &= ~(1 << highbit);
2699 if (!udma_mask)
2700 return -ENOENT;
2701 } else if (mwdma_mask) {
2702 highbit = fls(mwdma_mask) - 1;
2703 mwdma_mask &= ~(1 << highbit);
2704 if (!mwdma_mask)
2705 return -ENOENT;
2706 }
2707 break;
2708
2709 case ATA_DNXFER_40C:
2710 udma_mask &= ATA_UDMA_MASK_40C;
2711 break;
2712
2713 case ATA_DNXFER_FORCE_PIO0:
2714 pio_mask &= 1;
2715 case ATA_DNXFER_FORCE_PIO:
2716 mwdma_mask = 0;
2717 udma_mask = 0;
2718 break;
2719
2720 default:
2721 BUG();
2722 }
2723
2724 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2725
2726 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2727 return -ENOENT;
2728
2729 if (!quiet) {
2730 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2731 snprintf(buf, sizeof(buf), "%s:%s",
2732 ata_mode_string(xfer_mask),
2733 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2734 else
2735 snprintf(buf, sizeof(buf), "%s",
2736 ata_mode_string(xfer_mask));
2737
2738 ata_dev_printk(dev, KERN_WARNING,
2739 "limiting speed to %s\n", buf);
2740 }
2741
2742 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2743 &dev->udma_mask);
2744
2745 return 0;
2746}
2747
2748static int ata_dev_set_mode(struct ata_device *dev)
2749{
2750 struct ata_eh_context *ehc = &dev->link->eh_context;
2751 unsigned int err_mask;
2752 int rc;
2753
2754 dev->flags &= ~ATA_DFLAG_PIO;
2755 if (dev->xfer_shift == ATA_SHIFT_PIO)
2756 dev->flags |= ATA_DFLAG_PIO;
2757
2758 err_mask = ata_dev_set_xfermode(dev);
2759 /* Old CFA may refuse this command, which is just fine */
2760 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2761 err_mask &= ~AC_ERR_DEV;
2762 /* Some very old devices and some bad newer ones fail any kind of
2763 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2764 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2765 dev->pio_mode <= XFER_PIO_2)
2766 err_mask &= ~AC_ERR_DEV;
2767 if (err_mask) {
2768 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2769 "(err_mask=0x%x)\n", err_mask);
2770 return -EIO;
2771 }
2772
2773 ehc->i.flags |= ATA_EHI_POST_SETMODE;
2774 rc = ata_dev_revalidate(dev, 0);
2775 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2776 if (rc)
2777 return rc;
2778
2779 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2780 dev->xfer_shift, (int)dev->xfer_mode);
2781
2782 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2783 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2784 return 0;
2785}
2786
2787/**
2788 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2789 * @link: link on which timings will be programmed
2790 * @r_failed_dev: out paramter for failed device
2791 *
2792 * Standard implementation of the function used to tune and set
2793 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2794 * ata_dev_set_mode() fails, pointer to the failing device is
2795 * returned in @r_failed_dev.
2796 *
2797 * LOCKING:
2798 * PCI/etc. bus probe sem.
2799 *
2800 * RETURNS:
2801 * 0 on success, negative errno otherwise
2802 */
2803
2804int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2805{
2806 struct ata_port *ap = link->ap;
2807 struct ata_device *dev;
2808 int rc = 0, used_dma = 0, found = 0;
2809
2810 /* step 1: calculate xfer_mask */
2811 ata_link_for_each_dev(dev, link) {
2812 unsigned int pio_mask, dma_mask;
2813
2814 if (!ata_dev_enabled(dev))
2815 continue;
2816
2817 ata_dev_xfermask(dev);
2818
2819 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2820 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2821 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2822 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2823
2824 found = 1;
2825 if (dev->dma_mode)
2826 used_dma = 1;
2827 }
2828 if (!found)
2829 goto out;
2830
2831 /* step 2: always set host PIO timings */
2832 ata_link_for_each_dev(dev, link) {
2833 if (!ata_dev_enabled(dev))
2834 continue;
2835
2836 if (!dev->pio_mode) {
2837 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2838 rc = -EINVAL;
2839 goto out;
2840 }
2841
2842 dev->xfer_mode = dev->pio_mode;
2843 dev->xfer_shift = ATA_SHIFT_PIO;
2844 if (ap->ops->set_piomode)
2845 ap->ops->set_piomode(ap, dev);
2846 }
2847
2848 /* step 3: set host DMA timings */
2849 ata_link_for_each_dev(dev, link) {
2850 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2851 continue;
2852
2853 dev->xfer_mode = dev->dma_mode;
2854 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2855 if (ap->ops->set_dmamode)
2856 ap->ops->set_dmamode(ap, dev);
2857 }
2858
2859 /* step 4: update devices' xfer mode */
2860 ata_link_for_each_dev(dev, link) {
2861 /* don't update suspended devices' xfer mode */
2862 if (!ata_dev_enabled(dev))
2863 continue;
2864
2865 rc = ata_dev_set_mode(dev);
2866 if (rc)
2867 goto out;
2868 }
2869
2870 /* Record simplex status. If we selected DMA then the other
2871 * host channels are not permitted to do so.
2872 */
2873 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2874 ap->host->simplex_claimed = ap;
2875
2876 out:
2877 if (rc)
2878 *r_failed_dev = dev;
2879 return rc;
2880}
2881
2882/**
2883 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2884 * @link: link on which timings will be programmed
2885 * @r_failed_dev: out paramter for failed device
2886 *
2887 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2888 * ata_set_mode() fails, pointer to the failing device is
2889 * returned in @r_failed_dev.
2890 *
2891 * LOCKING:
2892 * PCI/etc. bus probe sem.
2893 *
2894 * RETURNS:
2895 * 0 on success, negative errno otherwise
2896 */
2897int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2898{
2899 struct ata_port *ap = link->ap;
2900
2901 /* has private set_mode? */
2902 if (ap->ops->set_mode)
2903 return ap->ops->set_mode(link, r_failed_dev);
2904 return ata_do_set_mode(link, r_failed_dev);
2905}
2906
2907/**
2908 * ata_tf_to_host - issue ATA taskfile to host controller
2909 * @ap: port to which command is being issued
2910 * @tf: ATA taskfile register set
2911 *
2912 * Issues ATA taskfile register set to ATA host controller,
2913 * with proper synchronization with interrupt handler and
2914 * other threads.
2915 *
2916 * LOCKING:
2917 * spin_lock_irqsave(host lock)
2918 */
2919
2920static inline void ata_tf_to_host(struct ata_port *ap,
2921 const struct ata_taskfile *tf)
2922{
2923 ap->ops->tf_load(ap, tf);
2924 ap->ops->exec_command(ap, tf);
2925}
2926
2927/**
2928 * ata_busy_sleep - sleep until BSY clears, or timeout
2929 * @ap: port containing status register to be polled
2930 * @tmout_pat: impatience timeout
2931 * @tmout: overall timeout
2932 *
2933 * Sleep until ATA Status register bit BSY clears,
2934 * or a timeout occurs.
2935 *
2936 * LOCKING:
2937 * Kernel thread context (may sleep).
2938 *
2939 * RETURNS:
2940 * 0 on success, -errno otherwise.
2941 */
2942int ata_busy_sleep(struct ata_port *ap,
2943 unsigned long tmout_pat, unsigned long tmout)
2944{
2945 unsigned long timer_start, timeout;
2946 u8 status;
2947
2948 status = ata_busy_wait(ap, ATA_BUSY, 300);
2949 timer_start = jiffies;
2950 timeout = timer_start + tmout_pat;
2951 while (status != 0xff && (status & ATA_BUSY) &&
2952 time_before(jiffies, timeout)) {
2953 msleep(50);
2954 status = ata_busy_wait(ap, ATA_BUSY, 3);
2955 }
2956
2957 if (status != 0xff && (status & ATA_BUSY))
2958 ata_port_printk(ap, KERN_WARNING,
2959 "port is slow to respond, please be patient "
2960 "(Status 0x%x)\n", status);
2961
2962 timeout = timer_start + tmout;
2963 while (status != 0xff && (status & ATA_BUSY) &&
2964 time_before(jiffies, timeout)) {
2965 msleep(50);
2966 status = ata_chk_status(ap);
2967 }
2968
2969 if (status == 0xff)
2970 return -ENODEV;
2971
2972 if (status & ATA_BUSY) {
2973 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2974 "(%lu secs, Status 0x%x)\n",
2975 tmout / HZ, status);
2976 return -EBUSY;
2977 }
2978
2979 return 0;
2980}
2981
2982/**
2983 * ata_wait_ready - sleep until BSY clears, or timeout
2984 * @ap: port containing status register to be polled
2985 * @deadline: deadline jiffies for the operation
2986 *
2987 * Sleep until ATA Status register bit BSY clears, or timeout
2988 * occurs.
2989 *
2990 * LOCKING:
2991 * Kernel thread context (may sleep).
2992 *
2993 * RETURNS:
2994 * 0 on success, -errno otherwise.
2995 */
2996int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
2997{
2998 unsigned long start = jiffies;
2999 int warned = 0;
3000
3001 while (1) {
3002 u8 status = ata_chk_status(ap);
3003 unsigned long now = jiffies;
3004
3005 if (!(status & ATA_BUSY))
3006 return 0;
3007 if (!ata_link_online(&ap->link) && status == 0xff)
3008 return -ENODEV;
3009 if (time_after(now, deadline))
3010 return -EBUSY;
3011
3012 if (!warned && time_after(now, start + 5 * HZ) &&
3013 (deadline - now > 3 * HZ)) {
3014 ata_port_printk(ap, KERN_WARNING,
3015 "port is slow to respond, please be patient "
3016 "(Status 0x%x)\n", status);
3017 warned = 1;
3018 }
3019
3020 msleep(50);
3021 }
3022}
3023
3024static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3025 unsigned long deadline)
3026{
3027 struct ata_ioports *ioaddr = &ap->ioaddr;
3028 unsigned int dev0 = devmask & (1 << 0);
3029 unsigned int dev1 = devmask & (1 << 1);
3030 int rc, ret = 0;
3031
3032 /* if device 0 was found in ata_devchk, wait for its
3033 * BSY bit to clear
3034 */
3035 if (dev0) {
3036 rc = ata_wait_ready(ap, deadline);
3037 if (rc) {
3038 if (rc != -ENODEV)
3039 return rc;
3040 ret = rc;
3041 }
3042 }
3043
3044 /* if device 1 was found in ata_devchk, wait for register
3045 * access briefly, then wait for BSY to clear.
3046 */
3047 if (dev1) {
3048 int i;
3049
3050 ap->ops->dev_select(ap, 1);
3051
3052 /* Wait for register access. Some ATAPI devices fail
3053 * to set nsect/lbal after reset, so don't waste too
3054 * much time on it. We're gonna wait for !BSY anyway.
3055 */
3056 for (i = 0; i < 2; i++) {
3057 u8 nsect, lbal;
3058
3059 nsect = ioread8(ioaddr->nsect_addr);
3060 lbal = ioread8(ioaddr->lbal_addr);
3061 if ((nsect == 1) && (lbal == 1))
3062 break;
3063 msleep(50); /* give drive a breather */
3064 }
3065
3066 rc = ata_wait_ready(ap, deadline);
3067 if (rc) {
3068 if (rc != -ENODEV)
3069 return rc;
3070 ret = rc;
3071 }
3072 }
3073
3074 /* is all this really necessary? */
3075 ap->ops->dev_select(ap, 0);
3076 if (dev1)
3077 ap->ops->dev_select(ap, 1);
3078 if (dev0)
3079 ap->ops->dev_select(ap, 0);
3080
3081 return ret;
3082}
3083
3084static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3085 unsigned long deadline)
3086{
3087 struct ata_ioports *ioaddr = &ap->ioaddr;
3088
3089 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3090
3091 /* software reset. causes dev0 to be selected */
3092 iowrite8(ap->ctl, ioaddr->ctl_addr);
3093 udelay(20); /* FIXME: flush */
3094 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3095 udelay(20); /* FIXME: flush */
3096 iowrite8(ap->ctl, ioaddr->ctl_addr);
3097
3098 /* spec mandates ">= 2ms" before checking status.
3099 * We wait 150ms, because that was the magic delay used for
3100 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3101 * between when the ATA command register is written, and then
3102 * status is checked. Because waiting for "a while" before
3103 * checking status is fine, post SRST, we perform this magic
3104 * delay here as well.
3105 *
3106 * Old drivers/ide uses the 2mS rule and then waits for ready
3107 */
3108 msleep(150);
3109
3110 /* Before we perform post reset processing we want to see if
3111 * the bus shows 0xFF because the odd clown forgets the D7
3112 * pulldown resistor.
3113 */
3114 if (ata_check_status(ap) == 0xFF)
3115 return -ENODEV;
3116
3117 return ata_bus_post_reset(ap, devmask, deadline);
3118}
3119
3120/**
3121 * ata_bus_reset - reset host port and associated ATA channel
3122 * @ap: port to reset
3123 *
3124 * This is typically the first time we actually start issuing
3125 * commands to the ATA channel. We wait for BSY to clear, then
3126 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3127 * result. Determine what devices, if any, are on the channel
3128 * by looking at the device 0/1 error register. Look at the signature
3129 * stored in each device's taskfile registers, to determine if
3130 * the device is ATA or ATAPI.
3131 *
3132 * LOCKING:
3133 * PCI/etc. bus probe sem.
3134 * Obtains host lock.
3135 *
3136 * SIDE EFFECTS:
3137 * Sets ATA_FLAG_DISABLED if bus reset fails.
3138 */
3139
3140void ata_bus_reset(struct ata_port *ap)
3141{
3142 struct ata_device *device = ap->link.device;
3143 struct ata_ioports *ioaddr = &ap->ioaddr;
3144 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3145 u8 err;
3146 unsigned int dev0, dev1 = 0, devmask = 0;
3147 int rc;
3148
3149 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3150
3151 /* determine if device 0/1 are present */
3152 if (ap->flags & ATA_FLAG_SATA_RESET)
3153 dev0 = 1;
3154 else {
3155 dev0 = ata_devchk(ap, 0);
3156 if (slave_possible)
3157 dev1 = ata_devchk(ap, 1);
3158 }
3159
3160 if (dev0)
3161 devmask |= (1 << 0);
3162 if (dev1)
3163 devmask |= (1 << 1);
3164
3165 /* select device 0 again */
3166 ap->ops->dev_select(ap, 0);
3167
3168 /* issue bus reset */
3169 if (ap->flags & ATA_FLAG_SRST) {
3170 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3171 if (rc && rc != -ENODEV)
3172 goto err_out;
3173 }
3174
3175 /*
3176 * determine by signature whether we have ATA or ATAPI devices
3177 */
3178 device[0].class = ata_dev_try_classify(ap, 0, &err);
3179 if ((slave_possible) && (err != 0x81))
3180 device[1].class = ata_dev_try_classify(ap, 1, &err);
3181
3182 /* is double-select really necessary? */
3183 if (device[1].class != ATA_DEV_NONE)
3184 ap->ops->dev_select(ap, 1);
3185 if (device[0].class != ATA_DEV_NONE)
3186 ap->ops->dev_select(ap, 0);
3187
3188 /* if no devices were detected, disable this port */
3189 if ((device[0].class == ATA_DEV_NONE) &&
3190 (device[1].class == ATA_DEV_NONE))
3191 goto err_out;
3192
3193 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3194 /* set up device control for ATA_FLAG_SATA_RESET */
3195 iowrite8(ap->ctl, ioaddr->ctl_addr);
3196 }
3197
3198 DPRINTK("EXIT\n");
3199 return;
3200
3201err_out:
3202 ata_port_printk(ap, KERN_ERR, "disabling port\n");
3203 ap->ops->port_disable(ap);
3204
3205 DPRINTK("EXIT\n");
3206}
3207
3208/**
3209 * sata_link_debounce - debounce SATA phy status
3210 * @link: ATA link to debounce SATA phy status for
3211 * @params: timing parameters { interval, duratinon, timeout } in msec
3212 * @deadline: deadline jiffies for the operation
3213 *
3214* Make sure SStatus of @link reaches stable state, determined by
3215 * holding the same value where DET is not 1 for @duration polled
3216 * every @interval, before @timeout. Timeout constraints the
3217 * beginning of the stable state. Because DET gets stuck at 1 on
3218 * some controllers after hot unplugging, this functions waits
3219 * until timeout then returns 0 if DET is stable at 1.
3220 *
3221 * @timeout is further limited by @deadline. The sooner of the
3222 * two is used.
3223 *
3224 * LOCKING:
3225 * Kernel thread context (may sleep)
3226 *
3227 * RETURNS:
3228 * 0 on success, -errno on failure.
3229 */
3230int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3231 unsigned long deadline)
3232{
3233 unsigned long interval_msec = params[0];
3234 unsigned long duration = msecs_to_jiffies(params[1]);
3235 unsigned long last_jiffies, t;
3236 u32 last, cur;
3237 int rc;
3238
3239 t = jiffies + msecs_to_jiffies(params[2]);
3240 if (time_before(t, deadline))
3241 deadline = t;
3242
3243 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3244 return rc;
3245 cur &= 0xf;
3246
3247 last = cur;
3248 last_jiffies = jiffies;
3249
3250 while (1) {
3251 msleep(interval_msec);
3252 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3253 return rc;
3254 cur &= 0xf;
3255
3256 /* DET stable? */
3257 if (cur == last) {
3258 if (cur == 1 && time_before(jiffies, deadline))
3259 continue;
3260 if (time_after(jiffies, last_jiffies + duration))
3261 return 0;
3262 continue;
3263 }
3264
3265 /* unstable, start over */
3266 last = cur;
3267 last_jiffies = jiffies;
3268
3269 /* Check deadline. If debouncing failed, return
3270 * -EPIPE to tell upper layer to lower link speed.
3271 */
3272 if (time_after(jiffies, deadline))
3273 return -EPIPE;
3274 }
3275}
3276
3277/**
3278 * sata_link_resume - resume SATA link
3279 * @link: ATA link to resume SATA
3280 * @params: timing parameters { interval, duratinon, timeout } in msec
3281 * @deadline: deadline jiffies for the operation
3282 *
3283 * Resume SATA phy @link and debounce it.
3284 *
3285 * LOCKING:
3286 * Kernel thread context (may sleep)
3287 *
3288 * RETURNS:
3289 * 0 on success, -errno on failure.
3290 */
3291int sata_link_resume(struct ata_link *link, const unsigned long *params,
3292 unsigned long deadline)
3293{
3294 u32 scontrol;
3295 int rc;
3296
3297 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3298 return rc;
3299
3300 scontrol = (scontrol & 0x0f0) | 0x300;
3301
3302 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3303 return rc;
3304
3305 /* Some PHYs react badly if SStatus is pounded immediately
3306 * after resuming. Delay 200ms before debouncing.
3307 */
3308 msleep(200);
3309
3310 return sata_link_debounce(link, params, deadline);
3311}
3312
3313/**
3314 * ata_std_prereset - prepare for reset
3315 * @link: ATA link to be reset
3316 * @deadline: deadline jiffies for the operation
3317 *
3318 * @link is about to be reset. Initialize it. Failure from
3319 * prereset makes libata abort whole reset sequence and give up
3320 * that port, so prereset should be best-effort. It does its
3321 * best to prepare for reset sequence but if things go wrong, it
3322 * should just whine, not fail.
3323 *
3324 * LOCKING:
3325 * Kernel thread context (may sleep)
3326 *
3327 * RETURNS:
3328 * 0 on success, -errno otherwise.
3329 */
3330int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3331{
3332 struct ata_port *ap = link->ap;
3333 struct ata_eh_context *ehc = &link->eh_context;
3334 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3335 int rc;
3336
3337 /* handle link resume */
3338 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3339 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3340 ehc->i.action |= ATA_EH_HARDRESET;
3341
3342 /* if we're about to do hardreset, nothing more to do */
3343 if (ehc->i.action & ATA_EH_HARDRESET)
3344 return 0;
3345
3346 /* if SATA, resume link */
3347 if (ap->flags & ATA_FLAG_SATA) {
3348 rc = sata_link_resume(link, timing, deadline);
3349 /* whine about phy resume failure but proceed */
3350 if (rc && rc != -EOPNOTSUPP)
3351 ata_link_printk(link, KERN_WARNING, "failed to resume "
3352 "link for reset (errno=%d)\n", rc);
3353 }
3354
3355 /* Wait for !BSY if the controller can wait for the first D2H
3356 * Reg FIS and we don't know that no device is attached.
3357 */
3358 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3359 rc = ata_wait_ready(ap, deadline);
3360 if (rc && rc != -ENODEV) {
3361 ata_link_printk(link, KERN_WARNING, "device not ready "
3362 "(errno=%d), forcing hardreset\n", rc);
3363 ehc->i.action |= ATA_EH_HARDRESET;
3364 }
3365 }
3366
3367 return 0;
3368}
3369
3370/**
3371 * ata_std_softreset - reset host port via ATA SRST
3372 * @link: ATA link to reset
3373 * @classes: resulting classes of attached devices
3374 * @deadline: deadline jiffies for the operation
3375 *
3376 * Reset host port using ATA SRST.
3377 *
3378 * LOCKING:
3379 * Kernel thread context (may sleep)
3380 *
3381 * RETURNS:
3382 * 0 on success, -errno otherwise.
3383 */
3384int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3385 unsigned long deadline)
3386{
3387 struct ata_port *ap = link->ap;
3388 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3389 unsigned int devmask = 0;
3390 int rc;
3391 u8 err;
3392
3393 DPRINTK("ENTER\n");
3394
3395 if (ata_link_offline(link)) {
3396 classes[0] = ATA_DEV_NONE;
3397 goto out;
3398 }
3399
3400 /* determine if device 0/1 are present */
3401 if (ata_devchk(ap, 0))
3402 devmask |= (1 << 0);
3403 if (slave_possible && ata_devchk(ap, 1))
3404 devmask |= (1 << 1);
3405
3406 /* select device 0 again */
3407 ap->ops->dev_select(ap, 0);
3408
3409 /* issue bus reset */
3410 DPRINTK("about to softreset, devmask=%x\n", devmask);
3411 rc = ata_bus_softreset(ap, devmask, deadline);
3412 /* if link is occupied, -ENODEV too is an error */
3413 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3414 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3415 return rc;
3416 }
3417
3418 /* determine by signature whether we have ATA or ATAPI devices */
3419 classes[0] = ata_dev_try_classify(ap, 0, &err);
3420 if (slave_possible && err != 0x81)
3421 classes[1] = ata_dev_try_classify(ap, 1, &err);
3422
3423 out:
3424 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3425 return 0;
3426}
3427
3428/**
3429 * sata_link_hardreset - reset link via SATA phy reset
3430 * @link: link to reset
3431 * @timing: timing parameters { interval, duratinon, timeout } in msec
3432 * @deadline: deadline jiffies for the operation
3433 *
3434 * SATA phy-reset @link using DET bits of SControl register.
3435 *
3436 * LOCKING:
3437 * Kernel thread context (may sleep)
3438 *
3439 * RETURNS:
3440 * 0 on success, -errno otherwise.
3441 */
3442int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3443 unsigned long deadline)
3444{
3445 u32 scontrol;
3446 int rc;
3447
3448 DPRINTK("ENTER\n");
3449
3450 if (sata_set_spd_needed(link)) {
3451 /* SATA spec says nothing about how to reconfigure
3452 * spd. To be on the safe side, turn off phy during
3453 * reconfiguration. This works for at least ICH7 AHCI
3454 * and Sil3124.
3455 */
3456 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3457 goto out;
3458
3459 scontrol = (scontrol & 0x0f0) | 0x304;
3460
3461 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3462 goto out;
3463
3464 sata_set_spd(link);
3465 }
3466
3467 /* issue phy wake/reset */
3468 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3469 goto out;
3470
3471 scontrol = (scontrol & 0x0f0) | 0x301;
3472
3473 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3474 goto out;
3475
3476 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3477 * 10.4.2 says at least 1 ms.
3478 */
3479 msleep(1);
3480
3481 /* bring link back */
3482 rc = sata_link_resume(link, timing, deadline);
3483 out:
3484 DPRINTK("EXIT, rc=%d\n", rc);
3485 return rc;
3486}
3487
3488/**
3489 * sata_std_hardreset - reset host port via SATA phy reset
3490 * @link: link to reset
3491 * @class: resulting class of attached device
3492 * @deadline: deadline jiffies for the operation
3493 *
3494 * SATA phy-reset host port using DET bits of SControl register,
3495 * wait for !BSY and classify the attached device.
3496 *
3497 * LOCKING:
3498 * Kernel thread context (may sleep)
3499 *
3500 * RETURNS:
3501 * 0 on success, -errno otherwise.
3502 */
3503int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3504 unsigned long deadline)
3505{
3506 struct ata_port *ap = link->ap;
3507 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3508 int rc;
3509
3510 DPRINTK("ENTER\n");
3511
3512 /* do hardreset */
3513 rc = sata_link_hardreset(link, timing, deadline);
3514 if (rc) {
3515 ata_link_printk(link, KERN_ERR,
3516 "COMRESET failed (errno=%d)\n", rc);
3517 return rc;
3518 }
3519
3520 /* TODO: phy layer with polling, timeouts, etc. */
3521 if (ata_link_offline(link)) {
3522 *class = ATA_DEV_NONE;
3523 DPRINTK("EXIT, link offline\n");
3524 return 0;
3525 }
3526
3527 /* wait a while before checking status, see SRST for more info */
3528 msleep(150);
3529
3530 rc = ata_wait_ready(ap, deadline);
3531 /* link occupied, -ENODEV too is an error */
3532 if (rc) {
3533 ata_link_printk(link, KERN_ERR,
3534 "COMRESET failed (errno=%d)\n", rc);
3535 return rc;
3536 }
3537
3538 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3539
3540 *class = ata_dev_try_classify(ap, 0, NULL);
3541
3542 DPRINTK("EXIT, class=%u\n", *class);
3543 return 0;
3544}
3545
3546/**
3547 * ata_std_postreset - standard postreset callback
3548 * @link: the target ata_link
3549 * @classes: classes of attached devices
3550 *
3551 * This function is invoked after a successful reset. Note that
3552 * the device might have been reset more than once using
3553 * different reset methods before postreset is invoked.
3554 *
3555 * LOCKING:
3556 * Kernel thread context (may sleep)
3557 */
3558void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3559{
3560 struct ata_port *ap = link->ap;
3561 u32 serror;
3562
3563 DPRINTK("ENTER\n");
3564
3565 /* print link status */
3566 sata_print_link_status(link);
3567
3568 /* clear SError */
3569 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3570 sata_scr_write(link, SCR_ERROR, serror);
3571
3572 /* is double-select really necessary? */
3573 if (classes[0] != ATA_DEV_NONE)
3574 ap->ops->dev_select(ap, 1);
3575 if (classes[1] != ATA_DEV_NONE)
3576 ap->ops->dev_select(ap, 0);
3577
3578 /* bail out if no device is present */
3579 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3580 DPRINTK("EXIT, no device\n");
3581 return;
3582 }
3583
3584 /* set up device control */
3585 if (ap->ioaddr.ctl_addr)
3586 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3587
3588 DPRINTK("EXIT\n");
3589}
3590
3591/**
3592 * ata_dev_same_device - Determine whether new ID matches configured device
3593 * @dev: device to compare against
3594 * @new_class: class of the new device
3595 * @new_id: IDENTIFY page of the new device
3596 *
3597 * Compare @new_class and @new_id against @dev and determine
3598 * whether @dev is the device indicated by @new_class and
3599 * @new_id.
3600 *
3601 * LOCKING:
3602 * None.
3603 *
3604 * RETURNS:
3605 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3606 */
3607static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3608 const u16 *new_id)
3609{
3610 const u16 *old_id = dev->id;
3611 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3612 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3613
3614 if (dev->class != new_class) {
3615 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3616 dev->class, new_class);
3617 return 0;
3618 }
3619
3620 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3621 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3622 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3623 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3624
3625 if (strcmp(model[0], model[1])) {
3626 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3627 "'%s' != '%s'\n", model[0], model[1]);
3628 return 0;
3629 }
3630
3631 if (strcmp(serial[0], serial[1])) {
3632 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3633 "'%s' != '%s'\n", serial[0], serial[1]);
3634 return 0;
3635 }
3636
3637 return 1;
3638}
3639
3640/**
3641 * ata_dev_reread_id - Re-read IDENTIFY data
3642 * @dev: target ATA device
3643 * @readid_flags: read ID flags
3644 *
3645 * Re-read IDENTIFY page and make sure @dev is still attached to
3646 * the port.
3647 *
3648 * LOCKING:
3649 * Kernel thread context (may sleep)
3650 *
3651 * RETURNS:
3652 * 0 on success, negative errno otherwise
3653 */
3654int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3655{
3656 unsigned int class = dev->class;
3657 u16 *id = (void *)dev->link->ap->sector_buf;
3658 int rc;
3659
3660 /* read ID data */
3661 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3662 if (rc)
3663 return rc;
3664
3665 /* is the device still there? */
3666 if (!ata_dev_same_device(dev, class, id))
3667 return -ENODEV;
3668
3669 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3670 return 0;
3671}
3672
3673/**
3674 * ata_dev_revalidate - Revalidate ATA device
3675 * @dev: device to revalidate
3676 * @readid_flags: read ID flags
3677 *
3678 * Re-read IDENTIFY page, make sure @dev is still attached to the
3679 * port and reconfigure it according to the new IDENTIFY page.
3680 *
3681 * LOCKING:
3682 * Kernel thread context (may sleep)
3683 *
3684 * RETURNS:
3685 * 0 on success, negative errno otherwise
3686 */
3687int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3688{
3689 u64 n_sectors = dev->n_sectors;
3690 int rc;
3691
3692 if (!ata_dev_enabled(dev))
3693 return -ENODEV;
3694
3695 /* re-read ID */
3696 rc = ata_dev_reread_id(dev, readid_flags);
3697 if (rc)
3698 goto fail;
3699
3700 /* configure device according to the new ID */
3701 rc = ata_dev_configure(dev);
3702 if (rc)
3703 goto fail;
3704
3705 /* verify n_sectors hasn't changed */
3706 if (dev->class == ATA_DEV_ATA && n_sectors &&
3707 dev->n_sectors != n_sectors) {
3708 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3709 "%llu != %llu\n",
3710 (unsigned long long)n_sectors,
3711 (unsigned long long)dev->n_sectors);
3712
3713 /* restore original n_sectors */
3714 dev->n_sectors = n_sectors;
3715
3716 rc = -ENODEV;
3717 goto fail;
3718 }
3719
3720 return 0;
3721
3722 fail:
3723 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3724 return rc;
3725}
3726
3727struct ata_blacklist_entry {
3728 const char *model_num;
3729 const char *model_rev;
3730 unsigned long horkage;
3731};
3732
3733static const struct ata_blacklist_entry ata_device_blacklist [] = {
3734 /* Devices with DMA related problems under Linux */
3735 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3736 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3737 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3738 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3739 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3740 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3741 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3742 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3743 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3744 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3745 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3746 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3747 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3748 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3749 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3750 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3751 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3752 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3753 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3754 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3755 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3756 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3757 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3758 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3759 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3760 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3761 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3762 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3763 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3764 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3765 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
3766 { "IOMEGA ZIP 250 ATAPI Floppy",
3767 NULL, ATA_HORKAGE_NODMA },
3768
3769 /* Weird ATAPI devices */
3770 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3771
3772 /* Devices we expect to fail diagnostics */
3773
3774 /* Devices where NCQ should be avoided */
3775 /* NCQ is slow */
3776 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3777 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3778 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3779 /* NCQ is broken */
3780 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
3781 { "Maxtor 6B200M0", "BANC1BM0", ATA_HORKAGE_NONCQ },
3782 { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
3783 { "Maxtor 7B250S0", "BANC1B70", ATA_HORKAGE_NONCQ, },
3784 { "Maxtor 7B300S0", "BANC1B70", ATA_HORKAGE_NONCQ },
3785 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3786 { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
3787 ATA_HORKAGE_NONCQ },
3788 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3789 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
3790 /* Blacklist entries taken from Silicon Image 3124/3132
3791 Windows driver .inf file - also several Linux problem reports */
3792 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3793 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3794 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
3795 /* Drives which do spurious command completion */
3796 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
3797 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
3798 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
3799 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
3800 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3801 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3802 { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, },
3803 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
3804
3805 /* devices which puke on READ_NATIVE_MAX */
3806 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3807 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3808 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3809 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3810
3811 /* End Marker */
3812 { }
3813};
3814
3815static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3816{
3817 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3818 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3819 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3820
3821 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3822 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3823
3824 while (ad->model_num) {
3825 if (!strcmp(ad->model_num, model_num)) {
3826 if (ad->model_rev == NULL)
3827 return ad->horkage;
3828 if (!strcmp(ad->model_rev, model_rev))
3829 return ad->horkage;
3830 }
3831 ad++;
3832 }
3833 return 0;
3834}
3835
3836static int ata_dma_blacklisted(const struct ata_device *dev)
3837{
3838 /* We don't support polling DMA.
3839 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3840 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3841 */
3842 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
3843 (dev->flags & ATA_DFLAG_CDB_INTR))
3844 return 1;
3845 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
3846}
3847
3848/**
3849 * ata_dev_xfermask - Compute supported xfermask of the given device
3850 * @dev: Device to compute xfermask for
3851 *
3852 * Compute supported xfermask of @dev and store it in
3853 * dev->*_mask. This function is responsible for applying all
3854 * known limits including host controller limits, device
3855 * blacklist, etc...
3856 *
3857 * LOCKING:
3858 * None.
3859 */
3860static void ata_dev_xfermask(struct ata_device *dev)
3861{
3862 struct ata_link *link = dev->link;
3863 struct ata_port *ap = link->ap;
3864 struct ata_host *host = ap->host;
3865 unsigned long xfer_mask;
3866
3867 /* controller modes available */
3868 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3869 ap->mwdma_mask, ap->udma_mask);
3870
3871 /* drive modes available */
3872 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3873 dev->mwdma_mask, dev->udma_mask);
3874 xfer_mask &= ata_id_xfermask(dev->id);
3875
3876 /*
3877 * CFA Advanced TrueIDE timings are not allowed on a shared
3878 * cable
3879 */
3880 if (ata_dev_pair(dev)) {
3881 /* No PIO5 or PIO6 */
3882 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3883 /* No MWDMA3 or MWDMA 4 */
3884 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3885 }
3886
3887 if (ata_dma_blacklisted(dev)) {
3888 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3889 ata_dev_printk(dev, KERN_WARNING,
3890 "device is on DMA blacklist, disabling DMA\n");
3891 }
3892
3893 if ((host->flags & ATA_HOST_SIMPLEX) &&
3894 host->simplex_claimed && host->simplex_claimed != ap) {
3895 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3896 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3897 "other device, disabling DMA\n");
3898 }
3899
3900 if (ap->flags & ATA_FLAG_NO_IORDY)
3901 xfer_mask &= ata_pio_mask_no_iordy(dev);
3902
3903 if (ap->ops->mode_filter)
3904 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
3905
3906 /* Apply cable rule here. Don't apply it early because when
3907 * we handle hot plug the cable type can itself change.
3908 * Check this last so that we know if the transfer rate was
3909 * solely limited by the cable.
3910 * Unknown or 80 wire cables reported host side are checked
3911 * drive side as well. Cases where we know a 40wire cable
3912 * is used safely for 80 are not checked here.
3913 */
3914 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3915 /* UDMA/44 or higher would be available */
3916 if((ap->cbl == ATA_CBL_PATA40) ||
3917 (ata_drive_40wire(dev->id) &&
3918 (ap->cbl == ATA_CBL_PATA_UNK ||
3919 ap->cbl == ATA_CBL_PATA80))) {
3920 ata_dev_printk(dev, KERN_WARNING,
3921 "limited to UDMA/33 due to 40-wire cable\n");
3922 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3923 }
3924
3925 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3926 &dev->mwdma_mask, &dev->udma_mask);
3927}
3928
3929/**
3930 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3931 * @dev: Device to which command will be sent
3932 *
3933 * Issue SET FEATURES - XFER MODE command to device @dev
3934 * on port @ap.
3935 *
3936 * LOCKING:
3937 * PCI/etc. bus probe sem.
3938 *
3939 * RETURNS:
3940 * 0 on success, AC_ERR_* mask otherwise.
3941 */
3942
3943static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3944{
3945 struct ata_taskfile tf;
3946 unsigned int err_mask;
3947
3948 /* set up set-features taskfile */
3949 DPRINTK("set features - xfer mode\n");
3950
3951 /* Some controllers and ATAPI devices show flaky interrupt
3952 * behavior after setting xfer mode. Use polling instead.
3953 */
3954 ata_tf_init(dev, &tf);
3955 tf.command = ATA_CMD_SET_FEATURES;
3956 tf.feature = SETFEATURES_XFER;
3957 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
3958 tf.protocol = ATA_PROT_NODATA;
3959 tf.nsect = dev->xfer_mode;
3960
3961 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3962
3963 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3964 return err_mask;
3965}
3966
3967/**
3968 * ata_dev_init_params - Issue INIT DEV PARAMS command
3969 * @dev: Device to which command will be sent
3970 * @heads: Number of heads (taskfile parameter)
3971 * @sectors: Number of sectors (taskfile parameter)
3972 *
3973 * LOCKING:
3974 * Kernel thread context (may sleep)
3975 *
3976 * RETURNS:
3977 * 0 on success, AC_ERR_* mask otherwise.
3978 */
3979static unsigned int ata_dev_init_params(struct ata_device *dev,
3980 u16 heads, u16 sectors)
3981{
3982 struct ata_taskfile tf;
3983 unsigned int err_mask;
3984
3985 /* Number of sectors per track 1-255. Number of heads 1-16 */
3986 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3987 return AC_ERR_INVALID;
3988
3989 /* set up init dev params taskfile */
3990 DPRINTK("init dev params \n");
3991
3992 ata_tf_init(dev, &tf);
3993 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3994 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3995 tf.protocol = ATA_PROT_NODATA;
3996 tf.nsect = sectors;
3997 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3998
3999 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4000 /* A clean abort indicates an original or just out of spec drive
4001 and we should continue as we issue the setup based on the
4002 drive reported working geometry */
4003 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4004 err_mask = 0;
4005
4006 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4007 return err_mask;
4008}
4009
4010/**
4011 * ata_sg_clean - Unmap DMA memory associated with command
4012 * @qc: Command containing DMA memory to be released
4013 *
4014 * Unmap all mapped DMA memory associated with this command.
4015 *
4016 * LOCKING:
4017 * spin_lock_irqsave(host lock)
4018 */
4019void ata_sg_clean(struct ata_queued_cmd *qc)
4020{
4021 struct ata_port *ap = qc->ap;
4022 struct scatterlist *sg = qc->__sg;
4023 int dir = qc->dma_dir;
4024 void *pad_buf = NULL;
4025
4026 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4027 WARN_ON(sg == NULL);
4028
4029 if (qc->flags & ATA_QCFLAG_SINGLE)
4030 WARN_ON(qc->n_elem > 1);
4031
4032 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4033
4034 /* if we padded the buffer out to 32-bit bound, and data
4035 * xfer direction is from-device, we must copy from the
4036 * pad buffer back into the supplied buffer
4037 */
4038 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4039 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4040
4041 if (qc->flags & ATA_QCFLAG_SG) {
4042 if (qc->n_elem)
4043 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4044 /* restore last sg */
4045 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4046 if (pad_buf) {
4047 struct scatterlist *psg = &qc->pad_sgent;
4048 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4049 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4050 kunmap_atomic(addr, KM_IRQ0);
4051 }
4052 } else {
4053 if (qc->n_elem)
4054 dma_unmap_single(ap->dev,
4055 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4056 dir);
4057 /* restore sg */
4058 sg->length += qc->pad_len;
4059 if (pad_buf)
4060 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4061 pad_buf, qc->pad_len);
4062 }
4063
4064 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4065 qc->__sg = NULL;
4066}
4067
4068/**
4069 * ata_fill_sg - Fill PCI IDE PRD table
4070 * @qc: Metadata associated with taskfile to be transferred
4071 *
4072 * Fill PCI IDE PRD (scatter-gather) table with segments
4073 * associated with the current disk command.
4074 *
4075 * LOCKING:
4076 * spin_lock_irqsave(host lock)
4077 *
4078 */
4079static void ata_fill_sg(struct ata_queued_cmd *qc)
4080{
4081 struct ata_port *ap = qc->ap;
4082 struct scatterlist *sg;
4083 unsigned int idx;
4084
4085 WARN_ON(qc->__sg == NULL);
4086 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4087
4088 idx = 0;
4089 ata_for_each_sg(sg, qc) {
4090 u32 addr, offset;
4091 u32 sg_len, len;
4092
4093 /* determine if physical DMA addr spans 64K boundary.
4094 * Note h/w doesn't support 64-bit, so we unconditionally
4095 * truncate dma_addr_t to u32.
4096 */
4097 addr = (u32) sg_dma_address(sg);
4098 sg_len = sg_dma_len(sg);
4099
4100 while (sg_len) {
4101 offset = addr & 0xffff;
4102 len = sg_len;
4103 if ((offset + sg_len) > 0x10000)
4104 len = 0x10000 - offset;
4105
4106 ap->prd[idx].addr = cpu_to_le32(addr);
4107 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4108 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4109
4110 idx++;
4111 sg_len -= len;
4112 addr += len;
4113 }
4114 }
4115
4116 if (idx)
4117 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4118}
4119
4120/**
4121 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4122 * @qc: Metadata associated with taskfile to be transferred
4123 *
4124 * Fill PCI IDE PRD (scatter-gather) table with segments
4125 * associated with the current disk command. Perform the fill
4126 * so that we avoid writing any length 64K records for
4127 * controllers that don't follow the spec.
4128 *
4129 * LOCKING:
4130 * spin_lock_irqsave(host lock)
4131 *
4132 */
4133static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4134{
4135 struct ata_port *ap = qc->ap;
4136 struct scatterlist *sg;
4137 unsigned int idx;
4138
4139 WARN_ON(qc->__sg == NULL);
4140 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4141
4142 idx = 0;
4143 ata_for_each_sg(sg, qc) {
4144 u32 addr, offset;
4145 u32 sg_len, len, blen;
4146
4147 /* determine if physical DMA addr spans 64K boundary.
4148 * Note h/w doesn't support 64-bit, so we unconditionally
4149 * truncate dma_addr_t to u32.
4150 */
4151 addr = (u32) sg_dma_address(sg);
4152 sg_len = sg_dma_len(sg);
4153
4154 while (sg_len) {
4155 offset = addr & 0xffff;
4156 len = sg_len;
4157 if ((offset + sg_len) > 0x10000)
4158 len = 0x10000 - offset;
4159
4160 blen = len & 0xffff;
4161 ap->prd[idx].addr = cpu_to_le32(addr);
4162 if (blen == 0) {
4163 /* Some PATA chipsets like the CS5530 can't
4164 cope with 0x0000 meaning 64K as the spec says */
4165 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4166 blen = 0x8000;
4167 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4168 }
4169 ap->prd[idx].flags_len = cpu_to_le32(blen);
4170 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4171
4172 idx++;
4173 sg_len -= len;
4174 addr += len;
4175 }
4176 }
4177
4178 if (idx)
4179 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4180}
4181
4182/**
4183 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4184 * @qc: Metadata associated with taskfile to check
4185 *
4186 * Allow low-level driver to filter ATA PACKET commands, returning
4187 * a status indicating whether or not it is OK to use DMA for the
4188 * supplied PACKET command.
4189 *
4190 * LOCKING:
4191 * spin_lock_irqsave(host lock)
4192 *
4193 * RETURNS: 0 when ATAPI DMA can be used
4194 * nonzero otherwise
4195 */
4196int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4197{
4198 struct ata_port *ap = qc->ap;
4199
4200 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4201 * few ATAPI devices choke on such DMA requests.
4202 */
4203 if (unlikely(qc->nbytes & 15))
4204 return 1;
4205
4206 if (ap->ops->check_atapi_dma)
4207 return ap->ops->check_atapi_dma(qc);
4208
4209 return 0;
4210}
4211
4212/**
4213 * ata_qc_prep - Prepare taskfile for submission
4214 * @qc: Metadata associated with taskfile to be prepared
4215 *
4216 * Prepare ATA taskfile for submission.
4217 *
4218 * LOCKING:
4219 * spin_lock_irqsave(host lock)
4220 */
4221void ata_qc_prep(struct ata_queued_cmd *qc)
4222{
4223 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4224 return;
4225
4226 ata_fill_sg(qc);
4227}
4228
4229/**
4230 * ata_dumb_qc_prep - Prepare taskfile for submission
4231 * @qc: Metadata associated with taskfile to be prepared
4232 *
4233 * Prepare ATA taskfile for submission.
4234 *
4235 * LOCKING:
4236 * spin_lock_irqsave(host lock)
4237 */
4238void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4239{
4240 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4241 return;
4242
4243 ata_fill_sg_dumb(qc);
4244}
4245
4246void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4247
4248/**
4249 * ata_sg_init_one - Associate command with memory buffer
4250 * @qc: Command to be associated
4251 * @buf: Memory buffer
4252 * @buflen: Length of memory buffer, in bytes.
4253 *
4254 * Initialize the data-related elements of queued_cmd @qc
4255 * to point to a single memory buffer, @buf of byte length @buflen.
4256 *
4257 * LOCKING:
4258 * spin_lock_irqsave(host lock)
4259 */
4260
4261void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4262{
4263 qc->flags |= ATA_QCFLAG_SINGLE;
4264
4265 qc->__sg = &qc->sgent;
4266 qc->n_elem = 1;
4267 qc->orig_n_elem = 1;
4268 qc->buf_virt = buf;
4269 qc->nbytes = buflen;
4270
4271 sg_init_one(&qc->sgent, buf, buflen);
4272}
4273
4274/**
4275 * ata_sg_init - Associate command with scatter-gather table.
4276 * @qc: Command to be associated
4277 * @sg: Scatter-gather table.
4278 * @n_elem: Number of elements in s/g table.
4279 *
4280 * Initialize the data-related elements of queued_cmd @qc
4281 * to point to a scatter-gather table @sg, containing @n_elem
4282 * elements.
4283 *
4284 * LOCKING:
4285 * spin_lock_irqsave(host lock)
4286 */
4287
4288void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4289 unsigned int n_elem)
4290{
4291 qc->flags |= ATA_QCFLAG_SG;
4292 qc->__sg = sg;
4293 qc->n_elem = n_elem;
4294 qc->orig_n_elem = n_elem;
4295}
4296
4297/**
4298 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4299 * @qc: Command with memory buffer to be mapped.
4300 *
4301 * DMA-map the memory buffer associated with queued_cmd @qc.
4302 *
4303 * LOCKING:
4304 * spin_lock_irqsave(host lock)
4305 *
4306 * RETURNS:
4307 * Zero on success, negative on error.
4308 */
4309
4310static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4311{
4312 struct ata_port *ap = qc->ap;
4313 int dir = qc->dma_dir;
4314 struct scatterlist *sg = qc->__sg;
4315 dma_addr_t dma_address;
4316 int trim_sg = 0;
4317
4318 /* we must lengthen transfers to end on a 32-bit boundary */
4319 qc->pad_len = sg->length & 3;
4320 if (qc->pad_len) {
4321 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4322 struct scatterlist *psg = &qc->pad_sgent;
4323
4324 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4325
4326 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4327
4328 if (qc->tf.flags & ATA_TFLAG_WRITE)
4329 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4330 qc->pad_len);
4331
4332 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4333 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4334 /* trim sg */
4335 sg->length -= qc->pad_len;
4336 if (sg->length == 0)
4337 trim_sg = 1;
4338
4339 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4340 sg->length, qc->pad_len);
4341 }
4342
4343 if (trim_sg) {
4344 qc->n_elem--;
4345 goto skip_map;
4346 }
4347
4348 dma_address = dma_map_single(ap->dev, qc->buf_virt,
4349 sg->length, dir);
4350 if (dma_mapping_error(dma_address)) {
4351 /* restore sg */
4352 sg->length += qc->pad_len;
4353 return -1;
4354 }
4355
4356 sg_dma_address(sg) = dma_address;
4357 sg_dma_len(sg) = sg->length;
4358
4359skip_map:
4360 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4361 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4362
4363 return 0;
4364}
4365
4366/**
4367 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4368 * @qc: Command with scatter-gather table to be mapped.
4369 *
4370 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4371 *
4372 * LOCKING:
4373 * spin_lock_irqsave(host lock)
4374 *
4375 * RETURNS:
4376 * Zero on success, negative on error.
4377 *
4378 */
4379
4380static int ata_sg_setup(struct ata_queued_cmd *qc)
4381{
4382 struct ata_port *ap = qc->ap;
4383 struct scatterlist *sg = qc->__sg;
4384 struct scatterlist *lsg = &sg[qc->n_elem - 1];
4385 int n_elem, pre_n_elem, dir, trim_sg = 0;
4386
4387 VPRINTK("ENTER, ata%u\n", ap->print_id);
4388 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4389
4390 /* we must lengthen transfers to end on a 32-bit boundary */
4391 qc->pad_len = lsg->length & 3;
4392 if (qc->pad_len) {
4393 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4394 struct scatterlist *psg = &qc->pad_sgent;
4395 unsigned int offset;
4396
4397 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4398
4399 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4400
4401 /*
4402 * psg->page/offset are used to copy to-be-written
4403 * data in this function or read data in ata_sg_clean.
4404 */
4405 offset = lsg->offset + lsg->length - qc->pad_len;
4406 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4407 psg->offset = offset_in_page(offset);
4408
4409 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4410 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4411 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4412 kunmap_atomic(addr, KM_IRQ0);
4413 }
4414
4415 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4416 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4417 /* trim last sg */
4418 lsg->length -= qc->pad_len;
4419 if (lsg->length == 0)
4420 trim_sg = 1;
4421
4422 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4423 qc->n_elem - 1, lsg->length, qc->pad_len);
4424 }
4425
4426 pre_n_elem = qc->n_elem;
4427 if (trim_sg && pre_n_elem)
4428 pre_n_elem--;
4429
4430 if (!pre_n_elem) {
4431 n_elem = 0;
4432 goto skip_map;
4433 }
4434
4435 dir = qc->dma_dir;
4436 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4437 if (n_elem < 1) {
4438 /* restore last sg */
4439 lsg->length += qc->pad_len;
4440 return -1;
4441 }
4442
4443 DPRINTK("%d sg elements mapped\n", n_elem);
4444
4445skip_map:
4446 qc->n_elem = n_elem;
4447
4448 return 0;
4449}
4450
4451/**
4452 * swap_buf_le16 - swap halves of 16-bit words in place
4453 * @buf: Buffer to swap
4454 * @buf_words: Number of 16-bit words in buffer.
4455 *
4456 * Swap halves of 16-bit words if needed to convert from
4457 * little-endian byte order to native cpu byte order, or
4458 * vice-versa.
4459 *
4460 * LOCKING:
4461 * Inherited from caller.
4462 */
4463void swap_buf_le16(u16 *buf, unsigned int buf_words)
4464{
4465#ifdef __BIG_ENDIAN
4466 unsigned int i;
4467
4468 for (i = 0; i < buf_words; i++)
4469 buf[i] = le16_to_cpu(buf[i]);
4470#endif /* __BIG_ENDIAN */
4471}
4472
4473/**
4474 * ata_data_xfer - Transfer data by PIO
4475 * @adev: device to target
4476 * @buf: data buffer
4477 * @buflen: buffer length
4478 * @write_data: read/write
4479 *
4480 * Transfer data from/to the device data register by PIO.
4481 *
4482 * LOCKING:
4483 * Inherited from caller.
4484 */
4485void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4486 unsigned int buflen, int write_data)
4487{
4488 struct ata_port *ap = adev->link->ap;
4489 unsigned int words = buflen >> 1;
4490
4491 /* Transfer multiple of 2 bytes */
4492 if (write_data)
4493 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4494 else
4495 ioread16_rep(ap->ioaddr.data_addr, buf, words);
4496
4497 /* Transfer trailing 1 byte, if any. */
4498 if (unlikely(buflen & 0x01)) {
4499 u16 align_buf[1] = { 0 };
4500 unsigned char *trailing_buf = buf + buflen - 1;
4501
4502 if (write_data) {
4503 memcpy(align_buf, trailing_buf, 1);
4504 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4505 } else {
4506 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4507 memcpy(trailing_buf, align_buf, 1);
4508 }
4509 }
4510}
4511
4512/**
4513 * ata_data_xfer_noirq - Transfer data by PIO
4514 * @adev: device to target
4515 * @buf: data buffer
4516 * @buflen: buffer length
4517 * @write_data: read/write
4518 *
4519 * Transfer data from/to the device data register by PIO. Do the
4520 * transfer with interrupts disabled.
4521 *
4522 * LOCKING:
4523 * Inherited from caller.
4524 */
4525void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4526 unsigned int buflen, int write_data)
4527{
4528 unsigned long flags;
4529 local_irq_save(flags);
4530 ata_data_xfer(adev, buf, buflen, write_data);
4531 local_irq_restore(flags);
4532}
4533
4534
4535/**
4536 * ata_pio_sector - Transfer a sector of data.
4537 * @qc: Command on going
4538 *
4539 * Transfer qc->sect_size bytes of data from/to the ATA device.
4540 *
4541 * LOCKING:
4542 * Inherited from caller.
4543 */
4544
4545static void ata_pio_sector(struct ata_queued_cmd *qc)
4546{
4547 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4548 struct scatterlist *sg = qc->__sg;
4549 struct ata_port *ap = qc->ap;
4550 struct page *page;
4551 unsigned int offset;
4552 unsigned char *buf;
4553
4554 if (qc->curbytes == qc->nbytes - qc->sect_size)
4555 ap->hsm_task_state = HSM_ST_LAST;
4556
4557 page = sg[qc->cursg].page;
4558 offset = sg[qc->cursg].offset + qc->cursg_ofs;
4559
4560 /* get the current page and offset */
4561 page = nth_page(page, (offset >> PAGE_SHIFT));
4562 offset %= PAGE_SIZE;
4563
4564 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4565
4566 if (PageHighMem(page)) {
4567 unsigned long flags;
4568
4569 /* FIXME: use a bounce buffer */
4570 local_irq_save(flags);
4571 buf = kmap_atomic(page, KM_IRQ0);
4572
4573 /* do the actual data transfer */
4574 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4575
4576 kunmap_atomic(buf, KM_IRQ0);
4577 local_irq_restore(flags);
4578 } else {
4579 buf = page_address(page);
4580 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4581 }
4582
4583 qc->curbytes += qc->sect_size;
4584 qc->cursg_ofs += qc->sect_size;
4585
4586 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4587 qc->cursg++;
4588 qc->cursg_ofs = 0;
4589 }
4590}
4591
4592/**
4593 * ata_pio_sectors - Transfer one or many sectors.
4594 * @qc: Command on going
4595 *
4596 * Transfer one or many sectors of data from/to the
4597 * ATA device for the DRQ request.
4598 *
4599 * LOCKING:
4600 * Inherited from caller.
4601 */
4602
4603static void ata_pio_sectors(struct ata_queued_cmd *qc)
4604{
4605 if (is_multi_taskfile(&qc->tf)) {
4606 /* READ/WRITE MULTIPLE */
4607 unsigned int nsect;
4608
4609 WARN_ON(qc->dev->multi_count == 0);
4610
4611 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4612 qc->dev->multi_count);
4613 while (nsect--)
4614 ata_pio_sector(qc);
4615 } else
4616 ata_pio_sector(qc);
4617
4618 ata_altstatus(qc->ap); /* flush */
4619}
4620
4621/**
4622 * atapi_send_cdb - Write CDB bytes to hardware
4623 * @ap: Port to which ATAPI device is attached.
4624 * @qc: Taskfile currently active
4625 *
4626 * When device has indicated its readiness to accept
4627 * a CDB, this function is called. Send the CDB.
4628 *
4629 * LOCKING:
4630 * caller.
4631 */
4632
4633static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4634{
4635 /* send SCSI cdb */
4636 DPRINTK("send cdb\n");
4637 WARN_ON(qc->dev->cdb_len < 12);
4638
4639 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4640 ata_altstatus(ap); /* flush */
4641
4642 switch (qc->tf.protocol) {
4643 case ATA_PROT_ATAPI:
4644 ap->hsm_task_state = HSM_ST;
4645 break;
4646 case ATA_PROT_ATAPI_NODATA:
4647 ap->hsm_task_state = HSM_ST_LAST;
4648 break;
4649 case ATA_PROT_ATAPI_DMA:
4650 ap->hsm_task_state = HSM_ST_LAST;
4651 /* initiate bmdma */
4652 ap->ops->bmdma_start(qc);
4653 break;
4654 }
4655}
4656
4657/**
4658 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4659 * @qc: Command on going
4660 * @bytes: number of bytes
4661 *
4662 * Transfer Transfer data from/to the ATAPI device.
4663 *
4664 * LOCKING:
4665 * Inherited from caller.
4666 *
4667 */
4668
4669static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4670{
4671 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4672 struct scatterlist *sg = qc->__sg;
4673 struct ata_port *ap = qc->ap;
4674 struct page *page;
4675 unsigned char *buf;
4676 unsigned int offset, count;
4677
4678 if (qc->curbytes + bytes >= qc->nbytes)
4679 ap->hsm_task_state = HSM_ST_LAST;
4680
4681next_sg:
4682 if (unlikely(qc->cursg >= qc->n_elem)) {
4683 /*
4684 * The end of qc->sg is reached and the device expects
4685 * more data to transfer. In order not to overrun qc->sg
4686 * and fulfill length specified in the byte count register,
4687 * - for read case, discard trailing data from the device
4688 * - for write case, padding zero data to the device
4689 */
4690 u16 pad_buf[1] = { 0 };
4691 unsigned int words = bytes >> 1;
4692 unsigned int i;
4693
4694 if (words) /* warning if bytes > 1 */
4695 ata_dev_printk(qc->dev, KERN_WARNING,
4696 "%u bytes trailing data\n", bytes);
4697
4698 for (i = 0; i < words; i++)
4699 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4700
4701 ap->hsm_task_state = HSM_ST_LAST;
4702 return;
4703 }
4704
4705 sg = &qc->__sg[qc->cursg];
4706
4707 page = sg->page;
4708 offset = sg->offset + qc->cursg_ofs;
4709
4710 /* get the current page and offset */
4711 page = nth_page(page, (offset >> PAGE_SHIFT));
4712 offset %= PAGE_SIZE;
4713
4714 /* don't overrun current sg */
4715 count = min(sg->length - qc->cursg_ofs, bytes);
4716
4717 /* don't cross page boundaries */
4718 count = min(count, (unsigned int)PAGE_SIZE - offset);
4719
4720 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4721
4722 if (PageHighMem(page)) {
4723 unsigned long flags;
4724
4725 /* FIXME: use bounce buffer */
4726 local_irq_save(flags);
4727 buf = kmap_atomic(page, KM_IRQ0);
4728
4729 /* do the actual data transfer */
4730 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4731
4732 kunmap_atomic(buf, KM_IRQ0);
4733 local_irq_restore(flags);
4734 } else {
4735 buf = page_address(page);
4736 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4737 }
4738
4739 bytes -= count;
4740 qc->curbytes += count;
4741 qc->cursg_ofs += count;
4742
4743 if (qc->cursg_ofs == sg->length) {
4744 qc->cursg++;
4745 qc->cursg_ofs = 0;
4746 }
4747
4748 if (bytes)
4749 goto next_sg;
4750}
4751
4752/**
4753 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4754 * @qc: Command on going
4755 *
4756 * Transfer Transfer data from/to the ATAPI device.
4757 *
4758 * LOCKING:
4759 * Inherited from caller.
4760 */
4761
4762static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4763{
4764 struct ata_port *ap = qc->ap;
4765 struct ata_device *dev = qc->dev;
4766 unsigned int ireason, bc_lo, bc_hi, bytes;
4767 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4768
4769 /* Abuse qc->result_tf for temp storage of intermediate TF
4770 * here to save some kernel stack usage.
4771 * For normal completion, qc->result_tf is not relevant. For
4772 * error, qc->result_tf is later overwritten by ata_qc_complete().
4773 * So, the correctness of qc->result_tf is not affected.
4774 */
4775 ap->ops->tf_read(ap, &qc->result_tf);
4776 ireason = qc->result_tf.nsect;
4777 bc_lo = qc->result_tf.lbam;
4778 bc_hi = qc->result_tf.lbah;
4779 bytes = (bc_hi << 8) | bc_lo;
4780
4781 /* shall be cleared to zero, indicating xfer of data */
4782 if (ireason & (1 << 0))
4783 goto err_out;
4784
4785 /* make sure transfer direction matches expected */
4786 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4787 if (do_write != i_write)
4788 goto err_out;
4789
4790 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4791
4792 __atapi_pio_bytes(qc, bytes);
4793 ata_altstatus(ap); /* flush */
4794
4795 return;
4796
4797err_out:
4798 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4799 qc->err_mask |= AC_ERR_HSM;
4800 ap->hsm_task_state = HSM_ST_ERR;
4801}
4802
4803/**
4804 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4805 * @ap: the target ata_port
4806 * @qc: qc on going
4807 *
4808 * RETURNS:
4809 * 1 if ok in workqueue, 0 otherwise.
4810 */
4811
4812static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4813{
4814 if (qc->tf.flags & ATA_TFLAG_POLLING)
4815 return 1;
4816
4817 if (ap->hsm_task_state == HSM_ST_FIRST) {
4818 if (qc->tf.protocol == ATA_PROT_PIO &&
4819 (qc->tf.flags & ATA_TFLAG_WRITE))
4820 return 1;
4821
4822 if (is_atapi_taskfile(&qc->tf) &&
4823 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4824 return 1;
4825 }
4826
4827 return 0;
4828}
4829
4830/**
4831 * ata_hsm_qc_complete - finish a qc running on standard HSM
4832 * @qc: Command to complete
4833 * @in_wq: 1 if called from workqueue, 0 otherwise
4834 *
4835 * Finish @qc which is running on standard HSM.
4836 *
4837 * LOCKING:
4838 * If @in_wq is zero, spin_lock_irqsave(host lock).
4839 * Otherwise, none on entry and grabs host lock.
4840 */
4841static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4842{
4843 struct ata_port *ap = qc->ap;
4844 unsigned long flags;
4845
4846 if (ap->ops->error_handler) {
4847 if (in_wq) {
4848 spin_lock_irqsave(ap->lock, flags);
4849
4850 /* EH might have kicked in while host lock is
4851 * released.
4852 */
4853 qc = ata_qc_from_tag(ap, qc->tag);
4854 if (qc) {
4855 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4856 ap->ops->irq_on(ap);
4857 ata_qc_complete(qc);
4858 } else
4859 ata_port_freeze(ap);
4860 }
4861
4862 spin_unlock_irqrestore(ap->lock, flags);
4863 } else {
4864 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4865 ata_qc_complete(qc);
4866 else
4867 ata_port_freeze(ap);
4868 }
4869 } else {
4870 if (in_wq) {
4871 spin_lock_irqsave(ap->lock, flags);
4872 ap->ops->irq_on(ap);
4873 ata_qc_complete(qc);
4874 spin_unlock_irqrestore(ap->lock, flags);
4875 } else
4876 ata_qc_complete(qc);
4877 }
4878}
4879
4880/**
4881 * ata_hsm_move - move the HSM to the next state.
4882 * @ap: the target ata_port
4883 * @qc: qc on going
4884 * @status: current device status
4885 * @in_wq: 1 if called from workqueue, 0 otherwise
4886 *
4887 * RETURNS:
4888 * 1 when poll next status needed, 0 otherwise.
4889 */
4890int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4891 u8 status, int in_wq)
4892{
4893 unsigned long flags = 0;
4894 int poll_next;
4895
4896 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4897
4898 /* Make sure ata_qc_issue_prot() does not throw things
4899 * like DMA polling into the workqueue. Notice that
4900 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4901 */
4902 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4903
4904fsm_start:
4905 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4906 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
4907
4908 switch (ap->hsm_task_state) {
4909 case HSM_ST_FIRST:
4910 /* Send first data block or PACKET CDB */
4911
4912 /* If polling, we will stay in the work queue after
4913 * sending the data. Otherwise, interrupt handler
4914 * takes over after sending the data.
4915 */
4916 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4917
4918 /* check device status */
4919 if (unlikely((status & ATA_DRQ) == 0)) {
4920 /* handle BSY=0, DRQ=0 as error */
4921 if (likely(status & (ATA_ERR | ATA_DF)))
4922 /* device stops HSM for abort/error */
4923 qc->err_mask |= AC_ERR_DEV;
4924 else
4925 /* HSM violation. Let EH handle this */
4926 qc->err_mask |= AC_ERR_HSM;
4927
4928 ap->hsm_task_state = HSM_ST_ERR;
4929 goto fsm_start;
4930 }
4931
4932 /* Device should not ask for data transfer (DRQ=1)
4933 * when it finds something wrong.
4934 * We ignore DRQ here and stop the HSM by
4935 * changing hsm_task_state to HSM_ST_ERR and
4936 * let the EH abort the command or reset the device.
4937 */
4938 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4939 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4940 "error, dev_stat 0x%X\n", status);
4941 qc->err_mask |= AC_ERR_HSM;
4942 ap->hsm_task_state = HSM_ST_ERR;
4943 goto fsm_start;
4944 }
4945
4946 /* Send the CDB (atapi) or the first data block (ata pio out).
4947 * During the state transition, interrupt handler shouldn't
4948 * be invoked before the data transfer is complete and
4949 * hsm_task_state is changed. Hence, the following locking.
4950 */
4951 if (in_wq)
4952 spin_lock_irqsave(ap->lock, flags);
4953
4954 if (qc->tf.protocol == ATA_PROT_PIO) {
4955 /* PIO data out protocol.
4956 * send first data block.
4957 */
4958
4959 /* ata_pio_sectors() might change the state
4960 * to HSM_ST_LAST. so, the state is changed here
4961 * before ata_pio_sectors().
4962 */
4963 ap->hsm_task_state = HSM_ST;
4964 ata_pio_sectors(qc);
4965 } else
4966 /* send CDB */
4967 atapi_send_cdb(ap, qc);
4968
4969 if (in_wq)
4970 spin_unlock_irqrestore(ap->lock, flags);
4971
4972 /* if polling, ata_pio_task() handles the rest.
4973 * otherwise, interrupt handler takes over from here.
4974 */
4975 break;
4976
4977 case HSM_ST:
4978 /* complete command or read/write the data register */
4979 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4980 /* ATAPI PIO protocol */
4981 if ((status & ATA_DRQ) == 0) {
4982 /* No more data to transfer or device error.
4983 * Device error will be tagged in HSM_ST_LAST.
4984 */
4985 ap->hsm_task_state = HSM_ST_LAST;
4986 goto fsm_start;
4987 }
4988
4989 /* Device should not ask for data transfer (DRQ=1)
4990 * when it finds something wrong.
4991 * We ignore DRQ here and stop the HSM by
4992 * changing hsm_task_state to HSM_ST_ERR and
4993 * let the EH abort the command or reset the device.
4994 */
4995 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4996 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4997 "device error, dev_stat 0x%X\n",
4998 status);
4999 qc->err_mask |= AC_ERR_HSM;
5000 ap->hsm_task_state = HSM_ST_ERR;
5001 goto fsm_start;
5002 }
5003
5004 atapi_pio_bytes(qc);
5005
5006 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5007 /* bad ireason reported by device */
5008 goto fsm_start;
5009
5010 } else {
5011 /* ATA PIO protocol */
5012 if (unlikely((status & ATA_DRQ) == 0)) {
5013 /* handle BSY=0, DRQ=0 as error */
5014 if (likely(status & (ATA_ERR | ATA_DF)))
5015 /* device stops HSM for abort/error */
5016 qc->err_mask |= AC_ERR_DEV;
5017 else
5018 /* HSM violation. Let EH handle this.
5019 * Phantom devices also trigger this
5020 * condition. Mark hint.
5021 */
5022 qc->err_mask |= AC_ERR_HSM |
5023 AC_ERR_NODEV_HINT;
5024
5025 ap->hsm_task_state = HSM_ST_ERR;
5026 goto fsm_start;
5027 }
5028
5029 /* For PIO reads, some devices may ask for
5030 * data transfer (DRQ=1) alone with ERR=1.
5031 * We respect DRQ here and transfer one
5032 * block of junk data before changing the
5033 * hsm_task_state to HSM_ST_ERR.
5034 *
5035 * For PIO writes, ERR=1 DRQ=1 doesn't make
5036 * sense since the data block has been
5037 * transferred to the device.
5038 */
5039 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5040 /* data might be corrputed */
5041 qc->err_mask |= AC_ERR_DEV;
5042
5043 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5044 ata_pio_sectors(qc);
5045 status = ata_wait_idle(ap);
5046 }
5047
5048 if (status & (ATA_BUSY | ATA_DRQ))
5049 qc->err_mask |= AC_ERR_HSM;
5050
5051 /* ata_pio_sectors() might change the
5052 * state to HSM_ST_LAST. so, the state
5053 * is changed after ata_pio_sectors().
5054 */
5055 ap->hsm_task_state = HSM_ST_ERR;
5056 goto fsm_start;
5057 }
5058
5059 ata_pio_sectors(qc);
5060
5061 if (ap->hsm_task_state == HSM_ST_LAST &&
5062 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5063 /* all data read */
5064 status = ata_wait_idle(ap);
5065 goto fsm_start;
5066 }
5067 }
5068
5069 poll_next = 1;
5070 break;
5071
5072 case HSM_ST_LAST:
5073 if (unlikely(!ata_ok(status))) {
5074 qc->err_mask |= __ac_err_mask(status);
5075 ap->hsm_task_state = HSM_ST_ERR;
5076 goto fsm_start;
5077 }
5078
5079 /* no more data to transfer */
5080 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5081 ap->print_id, qc->dev->devno, status);
5082
5083 WARN_ON(qc->err_mask);
5084
5085 ap->hsm_task_state = HSM_ST_IDLE;
5086
5087 /* complete taskfile transaction */
5088 ata_hsm_qc_complete(qc, in_wq);
5089
5090 poll_next = 0;
5091 break;
5092
5093 case HSM_ST_ERR:
5094 /* make sure qc->err_mask is available to
5095 * know what's wrong and recover
5096 */
5097 WARN_ON(qc->err_mask == 0);
5098
5099 ap->hsm_task_state = HSM_ST_IDLE;
5100
5101 /* complete taskfile transaction */
5102 ata_hsm_qc_complete(qc, in_wq);
5103
5104 poll_next = 0;
5105 break;
5106 default:
5107 poll_next = 0;
5108 BUG();
5109 }
5110
5111 return poll_next;
5112}
5113
5114static void ata_pio_task(struct work_struct *work)
5115{
5116 struct ata_port *ap =
5117 container_of(work, struct ata_port, port_task.work);
5118 struct ata_queued_cmd *qc = ap->port_task_data;
5119 u8 status;
5120 int poll_next;
5121
5122fsm_start:
5123 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5124
5125 /*
5126 * This is purely heuristic. This is a fast path.
5127 * Sometimes when we enter, BSY will be cleared in
5128 * a chk-status or two. If not, the drive is probably seeking
5129 * or something. Snooze for a couple msecs, then
5130 * chk-status again. If still busy, queue delayed work.
5131 */
5132 status = ata_busy_wait(ap, ATA_BUSY, 5);
5133 if (status & ATA_BUSY) {
5134 msleep(2);
5135 status = ata_busy_wait(ap, ATA_BUSY, 10);
5136 if (status & ATA_BUSY) {
5137 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5138 return;
5139 }
5140 }
5141
5142 /* move the HSM */
5143 poll_next = ata_hsm_move(ap, qc, status, 1);
5144
5145 /* another command or interrupt handler
5146 * may be running at this point.
5147 */
5148 if (poll_next)
5149 goto fsm_start;
5150}
5151
5152/**
5153 * ata_qc_new - Request an available ATA command, for queueing
5154 * @ap: Port associated with device @dev
5155 * @dev: Device from whom we request an available command structure
5156 *
5157 * LOCKING:
5158 * None.
5159 */
5160
5161static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5162{
5163 struct ata_queued_cmd *qc = NULL;
5164 unsigned int i;
5165
5166 /* no command while frozen */
5167 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5168 return NULL;
5169
5170 /* the last tag is reserved for internal command. */
5171 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5172 if (!test_and_set_bit(i, &ap->qc_allocated)) {
5173 qc = __ata_qc_from_tag(ap, i);
5174 break;
5175 }
5176
5177 if (qc)
5178 qc->tag = i;
5179
5180 return qc;
5181}
5182
5183/**
5184 * ata_qc_new_init - Request an available ATA command, and initialize it
5185 * @dev: Device from whom we request an available command structure
5186 *
5187 * LOCKING:
5188 * None.
5189 */
5190
5191struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5192{
5193 struct ata_port *ap = dev->link->ap;
5194 struct ata_queued_cmd *qc;
5195
5196 qc = ata_qc_new(ap);
5197 if (qc) {
5198 qc->scsicmd = NULL;
5199 qc->ap = ap;
5200 qc->dev = dev;
5201
5202 ata_qc_reinit(qc);
5203 }
5204
5205 return qc;
5206}
5207
5208/**
5209 * ata_qc_free - free unused ata_queued_cmd
5210 * @qc: Command to complete
5211 *
5212 * Designed to free unused ata_queued_cmd object
5213 * in case something prevents using it.
5214 *
5215 * LOCKING:
5216 * spin_lock_irqsave(host lock)
5217 */
5218void ata_qc_free(struct ata_queued_cmd *qc)
5219{
5220 struct ata_port *ap = qc->ap;
5221 unsigned int tag;
5222
5223 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5224
5225 qc->flags = 0;
5226 tag = qc->tag;
5227 if (likely(ata_tag_valid(tag))) {
5228 qc->tag = ATA_TAG_POISON;
5229 clear_bit(tag, &ap->qc_allocated);
5230 }
5231}
5232
5233void __ata_qc_complete(struct ata_queued_cmd *qc)
5234{
5235 struct ata_port *ap = qc->ap;
5236 struct ata_link *link = qc->dev->link;
5237
5238 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5239 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5240
5241 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5242 ata_sg_clean(qc);
5243
5244 /* command should be marked inactive atomically with qc completion */
5245 if (qc->tf.protocol == ATA_PROT_NCQ)
5246 link->sactive &= ~(1 << qc->tag);
5247 else
5248 link->active_tag = ATA_TAG_POISON;
5249
5250 /* atapi: mark qc as inactive to prevent the interrupt handler
5251 * from completing the command twice later, before the error handler
5252 * is called. (when rc != 0 and atapi request sense is needed)
5253 */
5254 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5255 ap->qc_active &= ~(1 << qc->tag);
5256
5257 /* call completion callback */
5258 qc->complete_fn(qc);
5259}
5260
5261static void fill_result_tf(struct ata_queued_cmd *qc)
5262{
5263 struct ata_port *ap = qc->ap;
5264
5265 qc->result_tf.flags = qc->tf.flags;
5266 ap->ops->tf_read(ap, &qc->result_tf);
5267}
5268
5269/**
5270 * ata_qc_complete - Complete an active ATA command
5271 * @qc: Command to complete
5272 * @err_mask: ATA Status register contents
5273 *
5274 * Indicate to the mid and upper layers that an ATA
5275 * command has completed, with either an ok or not-ok status.
5276 *
5277 * LOCKING:
5278 * spin_lock_irqsave(host lock)
5279 */
5280void ata_qc_complete(struct ata_queued_cmd *qc)
5281{
5282 struct ata_port *ap = qc->ap;
5283
5284 /* XXX: New EH and old EH use different mechanisms to
5285 * synchronize EH with regular execution path.
5286 *
5287 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5288 * Normal execution path is responsible for not accessing a
5289 * failed qc. libata core enforces the rule by returning NULL
5290 * from ata_qc_from_tag() for failed qcs.
5291 *
5292 * Old EH depends on ata_qc_complete() nullifying completion
5293 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5294 * not synchronize with interrupt handler. Only PIO task is
5295 * taken care of.
5296 */
5297 if (ap->ops->error_handler) {
5298 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5299
5300 if (unlikely(qc->err_mask))
5301 qc->flags |= ATA_QCFLAG_FAILED;
5302
5303 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5304 if (!ata_tag_internal(qc->tag)) {
5305 /* always fill result TF for failed qc */
5306 fill_result_tf(qc);
5307 ata_qc_schedule_eh(qc);
5308 return;
5309 }
5310 }
5311
5312 /* read result TF if requested */
5313 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5314 fill_result_tf(qc);
5315
5316 __ata_qc_complete(qc);
5317 } else {
5318 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5319 return;
5320
5321 /* read result TF if failed or requested */
5322 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5323 fill_result_tf(qc);
5324
5325 __ata_qc_complete(qc);
5326 }
5327}
5328
5329/**
5330 * ata_qc_complete_multiple - Complete multiple qcs successfully
5331 * @ap: port in question
5332 * @qc_active: new qc_active mask
5333 * @finish_qc: LLDD callback invoked before completing a qc
5334 *
5335 * Complete in-flight commands. This functions is meant to be
5336 * called from low-level driver's interrupt routine to complete
5337 * requests normally. ap->qc_active and @qc_active is compared
5338 * and commands are completed accordingly.
5339 *
5340 * LOCKING:
5341 * spin_lock_irqsave(host lock)
5342 *
5343 * RETURNS:
5344 * Number of completed commands on success, -errno otherwise.
5345 */
5346int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5347 void (*finish_qc)(struct ata_queued_cmd *))
5348{
5349 int nr_done = 0;
5350 u32 done_mask;
5351 int i;
5352
5353 done_mask = ap->qc_active ^ qc_active;
5354
5355 if (unlikely(done_mask & qc_active)) {
5356 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5357 "(%08x->%08x)\n", ap->qc_active, qc_active);
5358 return -EINVAL;
5359 }
5360
5361 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5362 struct ata_queued_cmd *qc;
5363
5364 if (!(done_mask & (1 << i)))
5365 continue;
5366
5367 if ((qc = ata_qc_from_tag(ap, i))) {
5368 if (finish_qc)
5369 finish_qc(qc);
5370 ata_qc_complete(qc);
5371 nr_done++;
5372 }
5373 }
5374
5375 return nr_done;
5376}
5377
5378static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5379{
5380 struct ata_port *ap = qc->ap;
5381
5382 switch (qc->tf.protocol) {
5383 case ATA_PROT_NCQ:
5384 case ATA_PROT_DMA:
5385 case ATA_PROT_ATAPI_DMA:
5386 return 1;
5387
5388 case ATA_PROT_ATAPI:
5389 case ATA_PROT_PIO:
5390 if (ap->flags & ATA_FLAG_PIO_DMA)
5391 return 1;
5392
5393 /* fall through */
5394
5395 default:
5396 return 0;
5397 }
5398
5399 /* never reached */
5400}
5401
5402/**
5403 * ata_qc_issue - issue taskfile to device
5404 * @qc: command to issue to device
5405 *
5406 * Prepare an ATA command to submission to device.
5407 * This includes mapping the data into a DMA-able
5408 * area, filling in the S/G table, and finally
5409 * writing the taskfile to hardware, starting the command.
5410 *
5411 * LOCKING:
5412 * spin_lock_irqsave(host lock)
5413 */
5414void ata_qc_issue(struct ata_queued_cmd *qc)
5415{
5416 struct ata_port *ap = qc->ap;
5417 struct ata_link *link = qc->dev->link;
5418
5419 /* Make sure only one non-NCQ command is outstanding. The
5420 * check is skipped for old EH because it reuses active qc to
5421 * request ATAPI sense.
5422 */
5423 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5424
5425 if (qc->tf.protocol == ATA_PROT_NCQ) {
5426 WARN_ON(link->sactive & (1 << qc->tag));
5427 link->sactive |= 1 << qc->tag;
5428 } else {
5429 WARN_ON(link->sactive);
5430 link->active_tag = qc->tag;
5431 }
5432
5433 qc->flags |= ATA_QCFLAG_ACTIVE;
5434 ap->qc_active |= 1 << qc->tag;
5435
5436 if (ata_should_dma_map(qc)) {
5437 if (qc->flags & ATA_QCFLAG_SG) {
5438 if (ata_sg_setup(qc))
5439 goto sg_err;
5440 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5441 if (ata_sg_setup_one(qc))
5442 goto sg_err;
5443 }
5444 } else {
5445 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5446 }
5447
5448 ap->ops->qc_prep(qc);
5449
5450 qc->err_mask |= ap->ops->qc_issue(qc);
5451 if (unlikely(qc->err_mask))
5452 goto err;
5453 return;
5454
5455sg_err:
5456 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5457 qc->err_mask |= AC_ERR_SYSTEM;
5458err:
5459 ata_qc_complete(qc);
5460}
5461
5462/**
5463 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5464 * @qc: command to issue to device
5465 *
5466 * Using various libata functions and hooks, this function
5467 * starts an ATA command. ATA commands are grouped into
5468 * classes called "protocols", and issuing each type of protocol
5469 * is slightly different.
5470 *
5471 * May be used as the qc_issue() entry in ata_port_operations.
5472 *
5473 * LOCKING:
5474 * spin_lock_irqsave(host lock)
5475 *
5476 * RETURNS:
5477 * Zero on success, AC_ERR_* mask on failure
5478 */
5479
5480unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5481{
5482 struct ata_port *ap = qc->ap;
5483
5484 /* Use polling pio if the LLD doesn't handle
5485 * interrupt driven pio and atapi CDB interrupt.
5486 */
5487 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5488 switch (qc->tf.protocol) {
5489 case ATA_PROT_PIO:
5490 case ATA_PROT_NODATA:
5491 case ATA_PROT_ATAPI:
5492 case ATA_PROT_ATAPI_NODATA:
5493 qc->tf.flags |= ATA_TFLAG_POLLING;
5494 break;
5495 case ATA_PROT_ATAPI_DMA:
5496 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5497 /* see ata_dma_blacklisted() */
5498 BUG();
5499 break;
5500 default:
5501 break;
5502 }
5503 }
5504
5505 /* select the device */
5506 ata_dev_select(ap, qc->dev->devno, 1, 0);
5507
5508 /* start the command */
5509 switch (qc->tf.protocol) {
5510 case ATA_PROT_NODATA:
5511 if (qc->tf.flags & ATA_TFLAG_POLLING)
5512 ata_qc_set_polling(qc);
5513
5514 ata_tf_to_host(ap, &qc->tf);
5515 ap->hsm_task_state = HSM_ST_LAST;
5516
5517 if (qc->tf.flags & ATA_TFLAG_POLLING)
5518 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5519
5520 break;
5521
5522 case ATA_PROT_DMA:
5523 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5524
5525 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5526 ap->ops->bmdma_setup(qc); /* set up bmdma */
5527 ap->ops->bmdma_start(qc); /* initiate bmdma */
5528 ap->hsm_task_state = HSM_ST_LAST;
5529 break;
5530
5531 case ATA_PROT_PIO:
5532 if (qc->tf.flags & ATA_TFLAG_POLLING)
5533 ata_qc_set_polling(qc);
5534
5535 ata_tf_to_host(ap, &qc->tf);
5536
5537 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5538 /* PIO data out protocol */
5539 ap->hsm_task_state = HSM_ST_FIRST;
5540 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5541
5542 /* always send first data block using
5543 * the ata_pio_task() codepath.
5544 */
5545 } else {
5546 /* PIO data in protocol */
5547 ap->hsm_task_state = HSM_ST;
5548
5549 if (qc->tf.flags & ATA_TFLAG_POLLING)
5550 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5551
5552 /* if polling, ata_pio_task() handles the rest.
5553 * otherwise, interrupt handler takes over from here.
5554 */
5555 }
5556
5557 break;
5558
5559 case ATA_PROT_ATAPI:
5560 case ATA_PROT_ATAPI_NODATA:
5561 if (qc->tf.flags & ATA_TFLAG_POLLING)
5562 ata_qc_set_polling(qc);
5563
5564 ata_tf_to_host(ap, &qc->tf);
5565
5566 ap->hsm_task_state = HSM_ST_FIRST;
5567
5568 /* send cdb by polling if no cdb interrupt */
5569 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5570 (qc->tf.flags & ATA_TFLAG_POLLING))
5571 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5572 break;
5573
5574 case ATA_PROT_ATAPI_DMA:
5575 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5576
5577 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5578 ap->ops->bmdma_setup(qc); /* set up bmdma */
5579 ap->hsm_task_state = HSM_ST_FIRST;
5580
5581 /* send cdb by polling if no cdb interrupt */
5582 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5583 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5584 break;
5585
5586 default:
5587 WARN_ON(1);
5588 return AC_ERR_SYSTEM;
5589 }
5590
5591 return 0;
5592}
5593
5594/**
5595 * ata_host_intr - Handle host interrupt for given (port, task)
5596 * @ap: Port on which interrupt arrived (possibly...)
5597 * @qc: Taskfile currently active in engine
5598 *
5599 * Handle host interrupt for given queued command. Currently,
5600 * only DMA interrupts are handled. All other commands are
5601 * handled via polling with interrupts disabled (nIEN bit).
5602 *
5603 * LOCKING:
5604 * spin_lock_irqsave(host lock)
5605 *
5606 * RETURNS:
5607 * One if interrupt was handled, zero if not (shared irq).
5608 */
5609
5610inline unsigned int ata_host_intr (struct ata_port *ap,
5611 struct ata_queued_cmd *qc)
5612{
5613 struct ata_eh_info *ehi = &ap->link.eh_info;
5614 u8 status, host_stat = 0;
5615
5616 VPRINTK("ata%u: protocol %d task_state %d\n",
5617 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5618
5619 /* Check whether we are expecting interrupt in this state */
5620 switch (ap->hsm_task_state) {
5621 case HSM_ST_FIRST:
5622 /* Some pre-ATAPI-4 devices assert INTRQ
5623 * at this state when ready to receive CDB.
5624 */
5625
5626 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5627 * The flag was turned on only for atapi devices.
5628 * No need to check is_atapi_taskfile(&qc->tf) again.
5629 */
5630 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5631 goto idle_irq;
5632 break;
5633 case HSM_ST_LAST:
5634 if (qc->tf.protocol == ATA_PROT_DMA ||
5635 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5636 /* check status of DMA engine */
5637 host_stat = ap->ops->bmdma_status(ap);
5638 VPRINTK("ata%u: host_stat 0x%X\n",
5639 ap->print_id, host_stat);
5640
5641 /* if it's not our irq... */
5642 if (!(host_stat & ATA_DMA_INTR))
5643 goto idle_irq;
5644
5645 /* before we do anything else, clear DMA-Start bit */
5646 ap->ops->bmdma_stop(qc);
5647
5648 if (unlikely(host_stat & ATA_DMA_ERR)) {
5649 /* error when transfering data to/from memory */
5650 qc->err_mask |= AC_ERR_HOST_BUS;
5651 ap->hsm_task_state = HSM_ST_ERR;
5652 }
5653 }
5654 break;
5655 case HSM_ST:
5656 break;
5657 default:
5658 goto idle_irq;
5659 }
5660
5661 /* check altstatus */
5662 status = ata_altstatus(ap);
5663 if (status & ATA_BUSY)
5664 goto idle_irq;
5665
5666 /* check main status, clearing INTRQ */
5667 status = ata_chk_status(ap);
5668 if (unlikely(status & ATA_BUSY))
5669 goto idle_irq;
5670
5671 /* ack bmdma irq events */
5672 ap->ops->irq_clear(ap);
5673
5674 ata_hsm_move(ap, qc, status, 0);
5675
5676 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5677 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5678 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5679
5680 return 1; /* irq handled */
5681
5682idle_irq:
5683 ap->stats.idle_irq++;
5684
5685#ifdef ATA_IRQ_TRAP
5686 if ((ap->stats.idle_irq % 1000) == 0) {
5687 ap->ops->irq_ack(ap, 0); /* debug trap */
5688 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5689 return 1;
5690 }
5691#endif
5692 return 0; /* irq not handled */
5693}
5694
5695/**
5696 * ata_interrupt - Default ATA host interrupt handler
5697 * @irq: irq line (unused)
5698 * @dev_instance: pointer to our ata_host information structure
5699 *
5700 * Default interrupt handler for PCI IDE devices. Calls
5701 * ata_host_intr() for each port that is not disabled.
5702 *
5703 * LOCKING:
5704 * Obtains host lock during operation.
5705 *
5706 * RETURNS:
5707 * IRQ_NONE or IRQ_HANDLED.
5708 */
5709
5710irqreturn_t ata_interrupt (int irq, void *dev_instance)
5711{
5712 struct ata_host *host = dev_instance;
5713 unsigned int i;
5714 unsigned int handled = 0;
5715 unsigned long flags;
5716
5717 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5718 spin_lock_irqsave(&host->lock, flags);
5719
5720 for (i = 0; i < host->n_ports; i++) {
5721 struct ata_port *ap;
5722
5723 ap = host->ports[i];
5724 if (ap &&
5725 !(ap->flags & ATA_FLAG_DISABLED)) {
5726 struct ata_queued_cmd *qc;
5727
5728 qc = ata_qc_from_tag(ap, ap->link.active_tag);
5729 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5730 (qc->flags & ATA_QCFLAG_ACTIVE))
5731 handled |= ata_host_intr(ap, qc);
5732 }
5733 }
5734
5735 spin_unlock_irqrestore(&host->lock, flags);
5736
5737 return IRQ_RETVAL(handled);
5738}
5739
5740/**
5741 * sata_scr_valid - test whether SCRs are accessible
5742 * @link: ATA link to test SCR accessibility for
5743 *
5744 * Test whether SCRs are accessible for @link.
5745 *
5746 * LOCKING:
5747 * None.
5748 *
5749 * RETURNS:
5750 * 1 if SCRs are accessible, 0 otherwise.
5751 */
5752int sata_scr_valid(struct ata_link *link)
5753{
5754 struct ata_port *ap = link->ap;
5755
5756 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5757}
5758
5759/**
5760 * sata_scr_read - read SCR register of the specified port
5761 * @link: ATA link to read SCR for
5762 * @reg: SCR to read
5763 * @val: Place to store read value
5764 *
5765 * Read SCR register @reg of @link into *@val. This function is
5766 * guaranteed to succeed if the cable type of the port is SATA
5767 * and the port implements ->scr_read.
5768 *
5769 * LOCKING:
5770 * None.
5771 *
5772 * RETURNS:
5773 * 0 on success, negative errno on failure.
5774 */
5775int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5776{
5777 struct ata_port *ap = link->ap;
5778
5779 if (sata_scr_valid(link))
5780 return ap->ops->scr_read(ap, reg, val);
5781 return -EOPNOTSUPP;
5782}
5783
5784/**
5785 * sata_scr_write - write SCR register of the specified port
5786 * @link: ATA link to write SCR for
5787 * @reg: SCR to write
5788 * @val: value to write
5789 *
5790 * Write @val to SCR register @reg of @link. This function is
5791 * guaranteed to succeed if the cable type of the port is SATA
5792 * and the port implements ->scr_read.
5793 *
5794 * LOCKING:
5795 * None.
5796 *
5797 * RETURNS:
5798 * 0 on success, negative errno on failure.
5799 */
5800int sata_scr_write(struct ata_link *link, int reg, u32 val)
5801{
5802 struct ata_port *ap = link->ap;
5803
5804 if (sata_scr_valid(link))
5805 return ap->ops->scr_write(ap, reg, val);
5806 return -EOPNOTSUPP;
5807}
5808
5809/**
5810 * sata_scr_write_flush - write SCR register of the specified port and flush
5811 * @link: ATA link to write SCR for
5812 * @reg: SCR to write
5813 * @val: value to write
5814 *
5815 * This function is identical to sata_scr_write() except that this
5816 * function performs flush after writing to the register.
5817 *
5818 * LOCKING:
5819 * None.
5820 *
5821 * RETURNS:
5822 * 0 on success, negative errno on failure.
5823 */
5824int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5825{
5826 struct ata_port *ap = link->ap;
5827 int rc;
5828
5829 if (sata_scr_valid(link)) {
5830 rc = ap->ops->scr_write(ap, reg, val);
5831 if (rc == 0)
5832 rc = ap->ops->scr_read(ap, reg, &val);
5833 return rc;
5834 }
5835 return -EOPNOTSUPP;
5836}
5837
5838/**
5839 * ata_link_online - test whether the given link is online
5840 * @link: ATA link to test
5841 *
5842 * Test whether @link is online. Note that this function returns
5843 * 0 if online status of @link cannot be obtained, so
5844 * ata_link_online(link) != !ata_link_offline(link).
5845 *
5846 * LOCKING:
5847 * None.
5848 *
5849 * RETURNS:
5850 * 1 if the port online status is available and online.
5851 */
5852int ata_link_online(struct ata_link *link)
5853{
5854 u32 sstatus;
5855
5856 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5857 (sstatus & 0xf) == 0x3)
5858 return 1;
5859 return 0;
5860}
5861
5862/**
5863 * ata_link_offline - test whether the given link is offline
5864 * @link: ATA link to test
5865 *
5866 * Test whether @link is offline. Note that this function
5867 * returns 0 if offline status of @link cannot be obtained, so
5868 * ata_link_online(link) != !ata_link_offline(link).
5869 *
5870 * LOCKING:
5871 * None.
5872 *
5873 * RETURNS:
5874 * 1 if the port offline status is available and offline.
5875 */
5876int ata_link_offline(struct ata_link *link)
5877{
5878 u32 sstatus;
5879
5880 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5881 (sstatus & 0xf) != 0x3)
5882 return 1;
5883 return 0;
5884}
5885
5886int ata_flush_cache(struct ata_device *dev)
5887{
5888 unsigned int err_mask;
5889 u8 cmd;
5890
5891 if (!ata_try_flush_cache(dev))
5892 return 0;
5893
5894 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5895 cmd = ATA_CMD_FLUSH_EXT;
5896 else
5897 cmd = ATA_CMD_FLUSH;
5898
5899 err_mask = ata_do_simple_cmd(dev, cmd);
5900 if (err_mask) {
5901 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5902 return -EIO;
5903 }
5904
5905 return 0;
5906}
5907
5908#ifdef CONFIG_PM
5909static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5910 unsigned int action, unsigned int ehi_flags,
5911 int wait)
5912{
5913 unsigned long flags;
5914 int i, rc;
5915
5916 for (i = 0; i < host->n_ports; i++) {
5917 struct ata_port *ap = host->ports[i];
5918 struct ata_link *link;
5919
5920 /* Previous resume operation might still be in
5921 * progress. Wait for PM_PENDING to clear.
5922 */
5923 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5924 ata_port_wait_eh(ap);
5925 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5926 }
5927
5928 /* request PM ops to EH */
5929 spin_lock_irqsave(ap->lock, flags);
5930
5931 ap->pm_mesg = mesg;
5932 if (wait) {
5933 rc = 0;
5934 ap->pm_result = &rc;
5935 }
5936
5937 ap->pflags |= ATA_PFLAG_PM_PENDING;
5938 __ata_port_for_each_link(link, ap) {
5939 link->eh_info.action |= action;
5940 link->eh_info.flags |= ehi_flags;
5941 }
5942
5943 ata_port_schedule_eh(ap);
5944
5945 spin_unlock_irqrestore(ap->lock, flags);
5946
5947 /* wait and check result */
5948 if (wait) {
5949 ata_port_wait_eh(ap);
5950 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5951 if (rc)
5952 return rc;
5953 }
5954 }
5955
5956 return 0;
5957}
5958
5959/**
5960 * ata_host_suspend - suspend host
5961 * @host: host to suspend
5962 * @mesg: PM message
5963 *
5964 * Suspend @host. Actual operation is performed by EH. This
5965 * function requests EH to perform PM operations and waits for EH
5966 * to finish.
5967 *
5968 * LOCKING:
5969 * Kernel thread context (may sleep).
5970 *
5971 * RETURNS:
5972 * 0 on success, -errno on failure.
5973 */
5974int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5975{
5976 int rc;
5977
5978 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5979 if (rc == 0)
5980 host->dev->power.power_state = mesg;
5981 return rc;
5982}
5983
5984/**
5985 * ata_host_resume - resume host
5986 * @host: host to resume
5987 *
5988 * Resume @host. Actual operation is performed by EH. This
5989 * function requests EH to perform PM operations and returns.
5990 * Note that all resume operations are performed parallely.
5991 *
5992 * LOCKING:
5993 * Kernel thread context (may sleep).
5994 */
5995void ata_host_resume(struct ata_host *host)
5996{
5997 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5998 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5999 host->dev->power.power_state = PMSG_ON;
6000}
6001#endif
6002
6003/**
6004 * ata_port_start - Set port up for dma.
6005 * @ap: Port to initialize
6006 *
6007 * Called just after data structures for each port are
6008 * initialized. Allocates space for PRD table.
6009 *
6010 * May be used as the port_start() entry in ata_port_operations.
6011 *
6012 * LOCKING:
6013 * Inherited from caller.
6014 */
6015int ata_port_start(struct ata_port *ap)
6016{
6017 struct device *dev = ap->dev;
6018 int rc;
6019
6020 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6021 GFP_KERNEL);
6022 if (!ap->prd)
6023 return -ENOMEM;
6024
6025 rc = ata_pad_alloc(ap, dev);
6026 if (rc)
6027 return rc;
6028
6029 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6030 (unsigned long long)ap->prd_dma);
6031 return 0;
6032}
6033
6034/**
6035 * ata_dev_init - Initialize an ata_device structure
6036 * @dev: Device structure to initialize
6037 *
6038 * Initialize @dev in preparation for probing.
6039 *
6040 * LOCKING:
6041 * Inherited from caller.
6042 */
6043void ata_dev_init(struct ata_device *dev)
6044{
6045 struct ata_link *link = dev->link;
6046 struct ata_port *ap = link->ap;
6047 unsigned long flags;
6048
6049 /* SATA spd limit is bound to the first device */
6050 link->sata_spd_limit = link->hw_sata_spd_limit;
6051 link->sata_spd = 0;
6052
6053 /* High bits of dev->flags are used to record warm plug
6054 * requests which occur asynchronously. Synchronize using
6055 * host lock.
6056 */
6057 spin_lock_irqsave(ap->lock, flags);
6058 dev->flags &= ~ATA_DFLAG_INIT_MASK;
6059 dev->horkage = 0;
6060 spin_unlock_irqrestore(ap->lock, flags);
6061
6062 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6063 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6064 dev->pio_mask = UINT_MAX;
6065 dev->mwdma_mask = UINT_MAX;
6066 dev->udma_mask = UINT_MAX;
6067}
6068
6069/**
6070 * ata_link_init - Initialize an ata_link structure
6071 * @ap: ATA port link is attached to
6072 * @link: Link structure to initialize
6073 * @pmp: Port multiplier port number
6074 *
6075 * Initialize @link.
6076 *
6077 * LOCKING:
6078 * Kernel thread context (may sleep)
6079 */
6080static void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
6081{
6082 int i;
6083
6084 /* clear everything except for devices */
6085 memset(link, 0, offsetof(struct ata_link, device[0]));
6086
6087 link->ap = ap;
6088 link->pmp = pmp;
6089 link->active_tag = ATA_TAG_POISON;
6090 link->hw_sata_spd_limit = UINT_MAX;
6091
6092 /* can't use iterator, ap isn't initialized yet */
6093 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6094 struct ata_device *dev = &link->device[i];
6095
6096 dev->link = link;
6097 dev->devno = dev - link->device;
6098 ata_dev_init(dev);
6099 }
6100}
6101
6102/**
6103 * sata_link_init_spd - Initialize link->sata_spd_limit
6104 * @link: Link to configure sata_spd_limit for
6105 *
6106 * Initialize @link->[hw_]sata_spd_limit to the currently
6107 * configured value.
6108 *
6109 * LOCKING:
6110 * Kernel thread context (may sleep).
6111 *
6112 * RETURNS:
6113 * 0 on success, -errno on failure.
6114 */
6115static int sata_link_init_spd(struct ata_link *link)
6116{
6117 u32 scontrol, spd;
6118 int rc;
6119
6120 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6121 if (rc)
6122 return rc;
6123
6124 spd = (scontrol >> 4) & 0xf;
6125 if (spd)
6126 link->hw_sata_spd_limit &= (1 << spd) - 1;
6127
6128 link->sata_spd_limit = link->hw_sata_spd_limit;
6129
6130 return 0;
6131}
6132
6133/**
6134 * ata_port_alloc - allocate and initialize basic ATA port resources
6135 * @host: ATA host this allocated port belongs to
6136 *
6137 * Allocate and initialize basic ATA port resources.
6138 *
6139 * RETURNS:
6140 * Allocate ATA port on success, NULL on failure.
6141 *
6142 * LOCKING:
6143 * Inherited from calling layer (may sleep).
6144 */
6145struct ata_port *ata_port_alloc(struct ata_host *host)
6146{
6147 struct ata_port *ap;
6148
6149 DPRINTK("ENTER\n");
6150
6151 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6152 if (!ap)
6153 return NULL;
6154
6155 ap->pflags |= ATA_PFLAG_INITIALIZING;
6156 ap->lock = &host->lock;
6157 ap->flags = ATA_FLAG_DISABLED;
6158 ap->print_id = -1;
6159 ap->ctl = ATA_DEVCTL_OBS;
6160 ap->host = host;
6161 ap->dev = host->dev;
6162 ap->last_ctl = 0xFF;
6163
6164#if defined(ATA_VERBOSE_DEBUG)
6165 /* turn on all debugging levels */
6166 ap->msg_enable = 0x00FF;
6167#elif defined(ATA_DEBUG)
6168 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6169#else
6170 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6171#endif
6172
6173 INIT_DELAYED_WORK(&ap->port_task, NULL);
6174 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6175 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6176 INIT_LIST_HEAD(&ap->eh_done_q);
6177 init_waitqueue_head(&ap->eh_wait_q);
6178 init_timer_deferrable(&ap->fastdrain_timer);
6179 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6180 ap->fastdrain_timer.data = (unsigned long)ap;
6181
6182 ap->cbl = ATA_CBL_NONE;
6183
6184 ata_link_init(ap, &ap->link, 0);
6185
6186#ifdef ATA_IRQ_TRAP
6187 ap->stats.unhandled_irq = 1;
6188 ap->stats.idle_irq = 1;
6189#endif
6190 return ap;
6191}
6192
6193static void ata_host_release(struct device *gendev, void *res)
6194{
6195 struct ata_host *host = dev_get_drvdata(gendev);
6196 int i;
6197
6198 for (i = 0; i < host->n_ports; i++) {
6199 struct ata_port *ap = host->ports[i];
6200
6201 if (!ap)
6202 continue;
6203
6204 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6205 ap->ops->port_stop(ap);
6206 }
6207
6208 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6209 host->ops->host_stop(host);
6210
6211 for (i = 0; i < host->n_ports; i++) {
6212 struct ata_port *ap = host->ports[i];
6213
6214 if (!ap)
6215 continue;
6216
6217 if (ap->scsi_host)
6218 scsi_host_put(ap->scsi_host);
6219
6220 kfree(ap);
6221 host->ports[i] = NULL;
6222 }
6223
6224 dev_set_drvdata(gendev, NULL);
6225}
6226
6227/**
6228 * ata_host_alloc - allocate and init basic ATA host resources
6229 * @dev: generic device this host is associated with
6230 * @max_ports: maximum number of ATA ports associated with this host
6231 *
6232 * Allocate and initialize basic ATA host resources. LLD calls
6233 * this function to allocate a host, initializes it fully and
6234 * attaches it using ata_host_register().
6235 *
6236 * @max_ports ports are allocated and host->n_ports is
6237 * initialized to @max_ports. The caller is allowed to decrease
6238 * host->n_ports before calling ata_host_register(). The unused
6239 * ports will be automatically freed on registration.
6240 *
6241 * RETURNS:
6242 * Allocate ATA host on success, NULL on failure.
6243 *
6244 * LOCKING:
6245 * Inherited from calling layer (may sleep).
6246 */
6247struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6248{
6249 struct ata_host *host;
6250 size_t sz;
6251 int i;
6252
6253 DPRINTK("ENTER\n");
6254
6255 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6256 return NULL;
6257
6258 /* alloc a container for our list of ATA ports (buses) */
6259 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6260 /* alloc a container for our list of ATA ports (buses) */
6261 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6262 if (!host)
6263 goto err_out;
6264
6265 devres_add(dev, host);
6266 dev_set_drvdata(dev, host);
6267
6268 spin_lock_init(&host->lock);
6269 host->dev = dev;
6270 host->n_ports = max_ports;
6271
6272 /* allocate ports bound to this host */
6273 for (i = 0; i < max_ports; i++) {
6274 struct ata_port *ap;
6275
6276 ap = ata_port_alloc(host);
6277 if (!ap)
6278 goto err_out;
6279
6280 ap->port_no = i;
6281 host->ports[i] = ap;
6282 }
6283
6284 devres_remove_group(dev, NULL);
6285 return host;
6286
6287 err_out:
6288 devres_release_group(dev, NULL);
6289 return NULL;
6290}
6291
6292/**
6293 * ata_host_alloc_pinfo - alloc host and init with port_info array
6294 * @dev: generic device this host is associated with
6295 * @ppi: array of ATA port_info to initialize host with
6296 * @n_ports: number of ATA ports attached to this host
6297 *
6298 * Allocate ATA host and initialize with info from @ppi. If NULL
6299 * terminated, @ppi may contain fewer entries than @n_ports. The
6300 * last entry will be used for the remaining ports.
6301 *
6302 * RETURNS:
6303 * Allocate ATA host on success, NULL on failure.
6304 *
6305 * LOCKING:
6306 * Inherited from calling layer (may sleep).
6307 */
6308struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6309 const struct ata_port_info * const * ppi,
6310 int n_ports)
6311{
6312 const struct ata_port_info *pi;
6313 struct ata_host *host;
6314 int i, j;
6315
6316 host = ata_host_alloc(dev, n_ports);
6317 if (!host)
6318 return NULL;
6319
6320 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6321 struct ata_port *ap = host->ports[i];
6322
6323 if (ppi[j])
6324 pi = ppi[j++];
6325
6326 ap->pio_mask = pi->pio_mask;
6327 ap->mwdma_mask = pi->mwdma_mask;
6328 ap->udma_mask = pi->udma_mask;
6329 ap->flags |= pi->flags;
6330 ap->link.flags |= pi->link_flags;
6331 ap->ops = pi->port_ops;
6332
6333 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6334 host->ops = pi->port_ops;
6335 if (!host->private_data && pi->private_data)
6336 host->private_data = pi->private_data;
6337 }
6338
6339 return host;
6340}
6341
6342/**
6343 * ata_host_start - start and freeze ports of an ATA host
6344 * @host: ATA host to start ports for
6345 *
6346 * Start and then freeze ports of @host. Started status is
6347 * recorded in host->flags, so this function can be called
6348 * multiple times. Ports are guaranteed to get started only
6349 * once. If host->ops isn't initialized yet, its set to the
6350 * first non-dummy port ops.
6351 *
6352 * LOCKING:
6353 * Inherited from calling layer (may sleep).
6354 *
6355 * RETURNS:
6356 * 0 if all ports are started successfully, -errno otherwise.
6357 */
6358int ata_host_start(struct ata_host *host)
6359{
6360 int i, rc;
6361
6362 if (host->flags & ATA_HOST_STARTED)
6363 return 0;
6364
6365 for (i = 0; i < host->n_ports; i++) {
6366 struct ata_port *ap = host->ports[i];
6367
6368 if (!host->ops && !ata_port_is_dummy(ap))
6369 host->ops = ap->ops;
6370
6371 if (ap->ops->port_start) {
6372 rc = ap->ops->port_start(ap);
6373 if (rc) {
6374 ata_port_printk(ap, KERN_ERR, "failed to "
6375 "start port (errno=%d)\n", rc);
6376 goto err_out;
6377 }
6378 }
6379
6380 ata_eh_freeze_port(ap);
6381 }
6382
6383 host->flags |= ATA_HOST_STARTED;
6384 return 0;
6385
6386 err_out:
6387 while (--i >= 0) {
6388 struct ata_port *ap = host->ports[i];
6389
6390 if (ap->ops->port_stop)
6391 ap->ops->port_stop(ap);
6392 }
6393 return rc;
6394}
6395
6396/**
6397 * ata_sas_host_init - Initialize a host struct
6398 * @host: host to initialize
6399 * @dev: device host is attached to
6400 * @flags: host flags
6401 * @ops: port_ops
6402 *
6403 * LOCKING:
6404 * PCI/etc. bus probe sem.
6405 *
6406 */
6407/* KILLME - the only user left is ipr */
6408void ata_host_init(struct ata_host *host, struct device *dev,
6409 unsigned long flags, const struct ata_port_operations *ops)
6410{
6411 spin_lock_init(&host->lock);
6412 host->dev = dev;
6413 host->flags = flags;
6414 host->ops = ops;
6415}
6416
6417/**
6418 * ata_host_register - register initialized ATA host
6419 * @host: ATA host to register
6420 * @sht: template for SCSI host
6421 *
6422 * Register initialized ATA host. @host is allocated using
6423 * ata_host_alloc() and fully initialized by LLD. This function
6424 * starts ports, registers @host with ATA and SCSI layers and
6425 * probe registered devices.
6426 *
6427 * LOCKING:
6428 * Inherited from calling layer (may sleep).
6429 *
6430 * RETURNS:
6431 * 0 on success, -errno otherwise.
6432 */
6433int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6434{
6435 int i, rc;
6436
6437 /* host must have been started */
6438 if (!(host->flags & ATA_HOST_STARTED)) {
6439 dev_printk(KERN_ERR, host->dev,
6440 "BUG: trying to register unstarted host\n");
6441 WARN_ON(1);
6442 return -EINVAL;
6443 }
6444
6445 /* Blow away unused ports. This happens when LLD can't
6446 * determine the exact number of ports to allocate at
6447 * allocation time.
6448 */
6449 for (i = host->n_ports; host->ports[i]; i++)
6450 kfree(host->ports[i]);
6451
6452 /* give ports names and add SCSI hosts */
6453 for (i = 0; i < host->n_ports; i++)
6454 host->ports[i]->print_id = ata_print_id++;
6455
6456 rc = ata_scsi_add_hosts(host, sht);
6457 if (rc)
6458 return rc;
6459
6460 /* associate with ACPI nodes */
6461 ata_acpi_associate(host);
6462
6463 /* set cable, sata_spd_limit and report */
6464 for (i = 0; i < host->n_ports; i++) {
6465 struct ata_port *ap = host->ports[i];
6466 int irq_line;
6467 unsigned long xfer_mask;
6468
6469 /* set SATA cable type if still unset */
6470 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6471 ap->cbl = ATA_CBL_SATA;
6472
6473 /* init sata_spd_limit to the current value */
6474 sata_link_init_spd(&ap->link);
6475
6476 /* report the secondary IRQ for second channel legacy */
6477 irq_line = host->irq;
6478 if (i == 1 && host->irq2)
6479 irq_line = host->irq2;
6480
6481 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6482 ap->udma_mask);
6483
6484 /* print per-port info to dmesg */
6485 if (!ata_port_is_dummy(ap))
6486 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6487 "ctl 0x%p bmdma 0x%p irq %d\n",
6488 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6489 ata_mode_string(xfer_mask),
6490 ap->ioaddr.cmd_addr,
6491 ap->ioaddr.ctl_addr,
6492 ap->ioaddr.bmdma_addr,
6493 irq_line);
6494 else
6495 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6496 }
6497
6498 /* perform each probe synchronously */
6499 DPRINTK("probe begin\n");
6500 for (i = 0; i < host->n_ports; i++) {
6501 struct ata_port *ap = host->ports[i];
6502 int rc;
6503
6504 /* probe */
6505 if (ap->ops->error_handler) {
6506 struct ata_eh_info *ehi = &ap->link.eh_info;
6507 unsigned long flags;
6508
6509 ata_port_probe(ap);
6510
6511 /* kick EH for boot probing */
6512 spin_lock_irqsave(ap->lock, flags);
6513
6514 ehi->probe_mask =
6515 (1 << ata_link_max_devices(&ap->link)) - 1;
6516 ehi->action |= ATA_EH_SOFTRESET;
6517 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6518
6519 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6520 ap->pflags |= ATA_PFLAG_LOADING;
6521 ata_port_schedule_eh(ap);
6522
6523 spin_unlock_irqrestore(ap->lock, flags);
6524
6525 /* wait for EH to finish */
6526 ata_port_wait_eh(ap);
6527 } else {
6528 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6529 rc = ata_bus_probe(ap);
6530 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6531
6532 if (rc) {
6533 /* FIXME: do something useful here?
6534 * Current libata behavior will
6535 * tear down everything when
6536 * the module is removed
6537 * or the h/w is unplugged.
6538 */
6539 }
6540 }
6541 }
6542
6543 /* probes are done, now scan each port's disk(s) */
6544 DPRINTK("host probe begin\n");
6545 for (i = 0; i < host->n_ports; i++) {
6546 struct ata_port *ap = host->ports[i];
6547
6548 ata_scsi_scan_host(ap, 1);
6549 }
6550
6551 return 0;
6552}
6553
6554/**
6555 * ata_host_activate - start host, request IRQ and register it
6556 * @host: target ATA host
6557 * @irq: IRQ to request
6558 * @irq_handler: irq_handler used when requesting IRQ
6559 * @irq_flags: irq_flags used when requesting IRQ
6560 * @sht: scsi_host_template to use when registering the host
6561 *
6562 * After allocating an ATA host and initializing it, most libata
6563 * LLDs perform three steps to activate the host - start host,
6564 * request IRQ and register it. This helper takes necessasry
6565 * arguments and performs the three steps in one go.
6566 *
6567 * LOCKING:
6568 * Inherited from calling layer (may sleep).
6569 *
6570 * RETURNS:
6571 * 0 on success, -errno otherwise.
6572 */
6573int ata_host_activate(struct ata_host *host, int irq,
6574 irq_handler_t irq_handler, unsigned long irq_flags,
6575 struct scsi_host_template *sht)
6576{
6577 int rc;
6578
6579 rc = ata_host_start(host);
6580 if (rc)
6581 return rc;
6582
6583 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6584 dev_driver_string(host->dev), host);
6585 if (rc)
6586 return rc;
6587
6588 /* Used to print device info at probe */
6589 host->irq = irq;
6590
6591 rc = ata_host_register(host, sht);
6592 /* if failed, just free the IRQ and leave ports alone */
6593 if (rc)
6594 devm_free_irq(host->dev, irq, host);
6595
6596 return rc;
6597}
6598
6599/**
6600 * ata_port_detach - Detach ATA port in prepration of device removal
6601 * @ap: ATA port to be detached
6602 *
6603 * Detach all ATA devices and the associated SCSI devices of @ap;
6604 * then, remove the associated SCSI host. @ap is guaranteed to
6605 * be quiescent on return from this function.
6606 *
6607 * LOCKING:
6608 * Kernel thread context (may sleep).
6609 */
6610void ata_port_detach(struct ata_port *ap)
6611{
6612 unsigned long flags;
6613 struct ata_link *link;
6614 struct ata_device *dev;
6615
6616 if (!ap->ops->error_handler)
6617 goto skip_eh;
6618
6619 /* tell EH we're leaving & flush EH */
6620 spin_lock_irqsave(ap->lock, flags);
6621 ap->pflags |= ATA_PFLAG_UNLOADING;
6622 spin_unlock_irqrestore(ap->lock, flags);
6623
6624 ata_port_wait_eh(ap);
6625
6626 /* EH is now guaranteed to see UNLOADING, so no new device
6627 * will be attached. Disable all existing devices.
6628 */
6629 spin_lock_irqsave(ap->lock, flags);
6630
6631 ata_port_for_each_link(link, ap) {
6632 ata_link_for_each_dev(dev, link)
6633 ata_dev_disable(dev);
6634 }
6635
6636 spin_unlock_irqrestore(ap->lock, flags);
6637
6638 /* Final freeze & EH. All in-flight commands are aborted. EH
6639 * will be skipped and retrials will be terminated with bad
6640 * target.
6641 */
6642 spin_lock_irqsave(ap->lock, flags);
6643 ata_port_freeze(ap); /* won't be thawed */
6644 spin_unlock_irqrestore(ap->lock, flags);
6645
6646 ata_port_wait_eh(ap);
6647 cancel_rearming_delayed_work(&ap->hotplug_task);
6648
6649 skip_eh:
6650 /* remove the associated SCSI host */
6651 scsi_remove_host(ap->scsi_host);
6652}
6653
6654/**
6655 * ata_host_detach - Detach all ports of an ATA host
6656 * @host: Host to detach
6657 *
6658 * Detach all ports of @host.
6659 *
6660 * LOCKING:
6661 * Kernel thread context (may sleep).
6662 */
6663void ata_host_detach(struct ata_host *host)
6664{
6665 int i;
6666
6667 for (i = 0; i < host->n_ports; i++)
6668 ata_port_detach(host->ports[i]);
6669}
6670
6671/**
6672 * ata_std_ports - initialize ioaddr with standard port offsets.
6673 * @ioaddr: IO address structure to be initialized
6674 *
6675 * Utility function which initializes data_addr, error_addr,
6676 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6677 * device_addr, status_addr, and command_addr to standard offsets
6678 * relative to cmd_addr.
6679 *
6680 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6681 */
6682
6683void ata_std_ports(struct ata_ioports *ioaddr)
6684{
6685 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6686 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6687 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6688 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6689 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6690 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6691 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6692 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6693 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6694 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6695}
6696
6697
6698#ifdef CONFIG_PCI
6699
6700/**
6701 * ata_pci_remove_one - PCI layer callback for device removal
6702 * @pdev: PCI device that was removed
6703 *
6704 * PCI layer indicates to libata via this hook that hot-unplug or
6705 * module unload event has occurred. Detach all ports. Resource
6706 * release is handled via devres.
6707 *
6708 * LOCKING:
6709 * Inherited from PCI layer (may sleep).
6710 */
6711void ata_pci_remove_one(struct pci_dev *pdev)
6712{
6713 struct device *dev = pci_dev_to_dev(pdev);
6714 struct ata_host *host = dev_get_drvdata(dev);
6715
6716 ata_host_detach(host);
6717}
6718
6719/* move to PCI subsystem */
6720int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6721{
6722 unsigned long tmp = 0;
6723
6724 switch (bits->width) {
6725 case 1: {
6726 u8 tmp8 = 0;
6727 pci_read_config_byte(pdev, bits->reg, &tmp8);
6728 tmp = tmp8;
6729 break;
6730 }
6731 case 2: {
6732 u16 tmp16 = 0;
6733 pci_read_config_word(pdev, bits->reg, &tmp16);
6734 tmp = tmp16;
6735 break;
6736 }
6737 case 4: {
6738 u32 tmp32 = 0;
6739 pci_read_config_dword(pdev, bits->reg, &tmp32);
6740 tmp = tmp32;
6741 break;
6742 }
6743
6744 default:
6745 return -EINVAL;
6746 }
6747
6748 tmp &= bits->mask;
6749
6750 return (tmp == bits->val) ? 1 : 0;
6751}
6752
6753#ifdef CONFIG_PM
6754void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6755{
6756 pci_save_state(pdev);
6757 pci_disable_device(pdev);
6758
6759 if (mesg.event == PM_EVENT_SUSPEND)
6760 pci_set_power_state(pdev, PCI_D3hot);
6761}
6762
6763int ata_pci_device_do_resume(struct pci_dev *pdev)
6764{
6765 int rc;
6766
6767 pci_set_power_state(pdev, PCI_D0);
6768 pci_restore_state(pdev);
6769
6770 rc = pcim_enable_device(pdev);
6771 if (rc) {
6772 dev_printk(KERN_ERR, &pdev->dev,
6773 "failed to enable device after resume (%d)\n", rc);
6774 return rc;
6775 }
6776
6777 pci_set_master(pdev);
6778 return 0;
6779}
6780
6781int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6782{
6783 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6784 int rc = 0;
6785
6786 rc = ata_host_suspend(host, mesg);
6787 if (rc)
6788 return rc;
6789
6790 ata_pci_device_do_suspend(pdev, mesg);
6791
6792 return 0;
6793}
6794
6795int ata_pci_device_resume(struct pci_dev *pdev)
6796{
6797 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6798 int rc;
6799
6800 rc = ata_pci_device_do_resume(pdev);
6801 if (rc == 0)
6802 ata_host_resume(host);
6803 return rc;
6804}
6805#endif /* CONFIG_PM */
6806
6807#endif /* CONFIG_PCI */
6808
6809
6810static int __init ata_init(void)
6811{
6812 ata_probe_timeout *= HZ;
6813 ata_wq = create_workqueue("ata");
6814 if (!ata_wq)
6815 return -ENOMEM;
6816
6817 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6818 if (!ata_aux_wq) {
6819 destroy_workqueue(ata_wq);
6820 return -ENOMEM;
6821 }
6822
6823 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6824 return 0;
6825}
6826
6827static void __exit ata_exit(void)
6828{
6829 destroy_workqueue(ata_wq);
6830 destroy_workqueue(ata_aux_wq);
6831}
6832
6833subsys_initcall(ata_init);
6834module_exit(ata_exit);
6835
6836static unsigned long ratelimit_time;
6837static DEFINE_SPINLOCK(ata_ratelimit_lock);
6838
6839int ata_ratelimit(void)
6840{
6841 int rc;
6842 unsigned long flags;
6843
6844 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6845
6846 if (time_after(jiffies, ratelimit_time)) {
6847 rc = 1;
6848 ratelimit_time = jiffies + (HZ/5);
6849 } else
6850 rc = 0;
6851
6852 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6853
6854 return rc;
6855}
6856
6857/**
6858 * ata_wait_register - wait until register value changes
6859 * @reg: IO-mapped register
6860 * @mask: Mask to apply to read register value
6861 * @val: Wait condition
6862 * @interval_msec: polling interval in milliseconds
6863 * @timeout_msec: timeout in milliseconds
6864 *
6865 * Waiting for some bits of register to change is a common
6866 * operation for ATA controllers. This function reads 32bit LE
6867 * IO-mapped register @reg and tests for the following condition.
6868 *
6869 * (*@reg & mask) != val
6870 *
6871 * If the condition is met, it returns; otherwise, the process is
6872 * repeated after @interval_msec until timeout.
6873 *
6874 * LOCKING:
6875 * Kernel thread context (may sleep)
6876 *
6877 * RETURNS:
6878 * The final register value.
6879 */
6880u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6881 unsigned long interval_msec,
6882 unsigned long timeout_msec)
6883{
6884 unsigned long timeout;
6885 u32 tmp;
6886
6887 tmp = ioread32(reg);
6888
6889 /* Calculate timeout _after_ the first read to make sure
6890 * preceding writes reach the controller before starting to
6891 * eat away the timeout.
6892 */
6893 timeout = jiffies + (timeout_msec * HZ) / 1000;
6894
6895 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6896 msleep(interval_msec);
6897 tmp = ioread32(reg);
6898 }
6899
6900 return tmp;
6901}
6902
6903/*
6904 * Dummy port_ops
6905 */
6906static void ata_dummy_noret(struct ata_port *ap) { }
6907static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6908static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6909
6910static u8 ata_dummy_check_status(struct ata_port *ap)
6911{
6912 return ATA_DRDY;
6913}
6914
6915static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6916{
6917 return AC_ERR_SYSTEM;
6918}
6919
6920const struct ata_port_operations ata_dummy_port_ops = {
6921 .port_disable = ata_port_disable,
6922 .check_status = ata_dummy_check_status,
6923 .check_altstatus = ata_dummy_check_status,
6924 .dev_select = ata_noop_dev_select,
6925 .qc_prep = ata_noop_qc_prep,
6926 .qc_issue = ata_dummy_qc_issue,
6927 .freeze = ata_dummy_noret,
6928 .thaw = ata_dummy_noret,
6929 .error_handler = ata_dummy_noret,
6930 .post_internal_cmd = ata_dummy_qc_noret,
6931 .irq_clear = ata_dummy_noret,
6932 .port_start = ata_dummy_ret0,
6933 .port_stop = ata_dummy_noret,
6934};
6935
6936const struct ata_port_info ata_dummy_port_info = {
6937 .port_ops = &ata_dummy_port_ops,
6938};
6939
6940/*
6941 * libata is essentially a library of internal helper functions for
6942 * low-level ATA host controller drivers. As such, the API/ABI is
6943 * likely to change as new drivers are added and updated.
6944 * Do not depend on ABI/API stability.
6945 */
6946
6947EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6948EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6949EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6950EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6951EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6952EXPORT_SYMBOL_GPL(ata_std_bios_param);
6953EXPORT_SYMBOL_GPL(ata_std_ports);
6954EXPORT_SYMBOL_GPL(ata_host_init);
6955EXPORT_SYMBOL_GPL(ata_host_alloc);
6956EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6957EXPORT_SYMBOL_GPL(ata_host_start);
6958EXPORT_SYMBOL_GPL(ata_host_register);
6959EXPORT_SYMBOL_GPL(ata_host_activate);
6960EXPORT_SYMBOL_GPL(ata_host_detach);
6961EXPORT_SYMBOL_GPL(ata_sg_init);
6962EXPORT_SYMBOL_GPL(ata_sg_init_one);
6963EXPORT_SYMBOL_GPL(ata_hsm_move);
6964EXPORT_SYMBOL_GPL(ata_qc_complete);
6965EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6966EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6967EXPORT_SYMBOL_GPL(ata_tf_load);
6968EXPORT_SYMBOL_GPL(ata_tf_read);
6969EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6970EXPORT_SYMBOL_GPL(ata_std_dev_select);
6971EXPORT_SYMBOL_GPL(sata_print_link_status);
6972EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6973EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6974EXPORT_SYMBOL_GPL(ata_check_status);
6975EXPORT_SYMBOL_GPL(ata_altstatus);
6976EXPORT_SYMBOL_GPL(ata_exec_command);
6977EXPORT_SYMBOL_GPL(ata_port_start);
6978EXPORT_SYMBOL_GPL(ata_sff_port_start);
6979EXPORT_SYMBOL_GPL(ata_interrupt);
6980EXPORT_SYMBOL_GPL(ata_do_set_mode);
6981EXPORT_SYMBOL_GPL(ata_data_xfer);
6982EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
6983EXPORT_SYMBOL_GPL(ata_qc_prep);
6984EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
6985EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6986EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6987EXPORT_SYMBOL_GPL(ata_bmdma_start);
6988EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6989EXPORT_SYMBOL_GPL(ata_bmdma_status);
6990EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6991EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6992EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6993EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6994EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6995EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6996EXPORT_SYMBOL_GPL(ata_port_probe);
6997EXPORT_SYMBOL_GPL(ata_dev_disable);
6998EXPORT_SYMBOL_GPL(sata_set_spd);
6999EXPORT_SYMBOL_GPL(sata_link_debounce);
7000EXPORT_SYMBOL_GPL(sata_link_resume);
7001EXPORT_SYMBOL_GPL(sata_phy_reset);
7002EXPORT_SYMBOL_GPL(__sata_phy_reset);
7003EXPORT_SYMBOL_GPL(ata_bus_reset);
7004EXPORT_SYMBOL_GPL(ata_std_prereset);
7005EXPORT_SYMBOL_GPL(ata_std_softreset);
7006EXPORT_SYMBOL_GPL(sata_link_hardreset);
7007EXPORT_SYMBOL_GPL(sata_std_hardreset);
7008EXPORT_SYMBOL_GPL(ata_std_postreset);
7009EXPORT_SYMBOL_GPL(ata_dev_classify);
7010EXPORT_SYMBOL_GPL(ata_dev_pair);
7011EXPORT_SYMBOL_GPL(ata_port_disable);
7012EXPORT_SYMBOL_GPL(ata_ratelimit);
7013EXPORT_SYMBOL_GPL(ata_wait_register);
7014EXPORT_SYMBOL_GPL(ata_busy_sleep);
7015EXPORT_SYMBOL_GPL(ata_wait_ready);
7016EXPORT_SYMBOL_GPL(ata_port_queue_task);
7017EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7018EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7019EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7020EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7021EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7022EXPORT_SYMBOL_GPL(ata_host_intr);
7023EXPORT_SYMBOL_GPL(sata_scr_valid);
7024EXPORT_SYMBOL_GPL(sata_scr_read);
7025EXPORT_SYMBOL_GPL(sata_scr_write);
7026EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7027EXPORT_SYMBOL_GPL(ata_link_online);
7028EXPORT_SYMBOL_GPL(ata_link_offline);
7029#ifdef CONFIG_PM
7030EXPORT_SYMBOL_GPL(ata_host_suspend);
7031EXPORT_SYMBOL_GPL(ata_host_resume);
7032#endif /* CONFIG_PM */
7033EXPORT_SYMBOL_GPL(ata_id_string);
7034EXPORT_SYMBOL_GPL(ata_id_c_string);
7035EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
7036EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7037
7038EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7039EXPORT_SYMBOL_GPL(ata_timing_compute);
7040EXPORT_SYMBOL_GPL(ata_timing_merge);
7041
7042#ifdef CONFIG_PCI
7043EXPORT_SYMBOL_GPL(pci_test_config_bits);
7044EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
7045EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7046EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7047EXPORT_SYMBOL_GPL(ata_pci_init_one);
7048EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7049#ifdef CONFIG_PM
7050EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7051EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7052EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7053EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7054#endif /* CONFIG_PM */
7055EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7056EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7057#endif /* CONFIG_PCI */
7058
7059EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7060EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7061EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7062EXPORT_SYMBOL_GPL(ata_eng_timeout);
7063EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7064EXPORT_SYMBOL_GPL(ata_link_abort);
7065EXPORT_SYMBOL_GPL(ata_port_abort);
7066EXPORT_SYMBOL_GPL(ata_port_freeze);
7067EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7068EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7069EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7070EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7071EXPORT_SYMBOL_GPL(ata_do_eh);
7072EXPORT_SYMBOL_GPL(ata_irq_on);
7073EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
7074EXPORT_SYMBOL_GPL(ata_irq_ack);
7075EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
7076EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7077
7078EXPORT_SYMBOL_GPL(ata_cable_40wire);
7079EXPORT_SYMBOL_GPL(ata_cable_80wire);
7080EXPORT_SYMBOL_GPL(ata_cable_unknown);
7081EXPORT_SYMBOL_GPL(ata_cable_sata);