]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/ata/libata-core.c
libata: consistently use msecs for time durations
[mirror_ubuntu-zesty-kernel.git] / drivers / ata / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <scsi/scsi.h>
60 #include <scsi/scsi_cmnd.h>
61 #include <scsi/scsi_host.h>
62 #include <linux/libata.h>
63 #include <asm/byteorder.h>
64 #include <linux/cdrom.h>
65
66 #include "libata.h"
67
68
69 /* debounce timing parameters in msecs { interval, duration, timeout } */
70 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
71 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
72 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
73
74 const struct ata_port_operations ata_base_port_ops = {
75 .prereset = ata_std_prereset,
76 .postreset = ata_std_postreset,
77 .error_handler = ata_std_error_handler,
78 };
79
80 const struct ata_port_operations sata_port_ops = {
81 .inherits = &ata_base_port_ops,
82
83 .qc_defer = ata_std_qc_defer,
84 .hardreset = sata_std_hardreset,
85 };
86
87 static unsigned int ata_dev_init_params(struct ata_device *dev,
88 u16 heads, u16 sectors);
89 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
90 static unsigned int ata_dev_set_feature(struct ata_device *dev,
91 u8 enable, u8 feature);
92 static void ata_dev_xfermask(struct ata_device *dev);
93 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
94
95 unsigned int ata_print_id = 1;
96 static struct workqueue_struct *ata_wq;
97
98 struct workqueue_struct *ata_aux_wq;
99
100 struct ata_force_param {
101 const char *name;
102 unsigned int cbl;
103 int spd_limit;
104 unsigned long xfer_mask;
105 unsigned int horkage_on;
106 unsigned int horkage_off;
107 };
108
109 struct ata_force_ent {
110 int port;
111 int device;
112 struct ata_force_param param;
113 };
114
115 static struct ata_force_ent *ata_force_tbl;
116 static int ata_force_tbl_size;
117
118 static char ata_force_param_buf[PAGE_SIZE] __initdata;
119 /* param_buf is thrown away after initialization, disallow read */
120 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
121 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
122
123 int atapi_enabled = 1;
124 module_param(atapi_enabled, int, 0444);
125 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
126
127 static int atapi_dmadir = 0;
128 module_param(atapi_dmadir, int, 0444);
129 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
130
131 int atapi_passthru16 = 1;
132 module_param(atapi_passthru16, int, 0444);
133 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
134
135 int libata_fua = 0;
136 module_param_named(fua, libata_fua, int, 0444);
137 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
138
139 static int ata_ignore_hpa;
140 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
141 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
142
143 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
144 module_param_named(dma, libata_dma_mask, int, 0444);
145 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
146
147 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / 1000;
148 module_param(ata_probe_timeout, int, 0444);
149 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
150
151 int libata_noacpi = 0;
152 module_param_named(noacpi, libata_noacpi, int, 0444);
153 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
154
155 int libata_allow_tpm = 0;
156 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
157 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
158
159 MODULE_AUTHOR("Jeff Garzik");
160 MODULE_DESCRIPTION("Library module for ATA devices");
161 MODULE_LICENSE("GPL");
162 MODULE_VERSION(DRV_VERSION);
163
164
165 /**
166 * ata_force_cbl - force cable type according to libata.force
167 * @ap: ATA port of interest
168 *
169 * Force cable type according to libata.force and whine about it.
170 * The last entry which has matching port number is used, so it
171 * can be specified as part of device force parameters. For
172 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
173 * same effect.
174 *
175 * LOCKING:
176 * EH context.
177 */
178 void ata_force_cbl(struct ata_port *ap)
179 {
180 int i;
181
182 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
183 const struct ata_force_ent *fe = &ata_force_tbl[i];
184
185 if (fe->port != -1 && fe->port != ap->print_id)
186 continue;
187
188 if (fe->param.cbl == ATA_CBL_NONE)
189 continue;
190
191 ap->cbl = fe->param.cbl;
192 ata_port_printk(ap, KERN_NOTICE,
193 "FORCE: cable set to %s\n", fe->param.name);
194 return;
195 }
196 }
197
198 /**
199 * ata_force_spd_limit - force SATA spd limit according to libata.force
200 * @link: ATA link of interest
201 *
202 * Force SATA spd limit according to libata.force and whine about
203 * it. When only the port part is specified (e.g. 1:), the limit
204 * applies to all links connected to both the host link and all
205 * fan-out ports connected via PMP. If the device part is
206 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
207 * link not the host link. Device number 15 always points to the
208 * host link whether PMP is attached or not.
209 *
210 * LOCKING:
211 * EH context.
212 */
213 static void ata_force_spd_limit(struct ata_link *link)
214 {
215 int linkno, i;
216
217 if (ata_is_host_link(link))
218 linkno = 15;
219 else
220 linkno = link->pmp;
221
222 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
223 const struct ata_force_ent *fe = &ata_force_tbl[i];
224
225 if (fe->port != -1 && fe->port != link->ap->print_id)
226 continue;
227
228 if (fe->device != -1 && fe->device != linkno)
229 continue;
230
231 if (!fe->param.spd_limit)
232 continue;
233
234 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
235 ata_link_printk(link, KERN_NOTICE,
236 "FORCE: PHY spd limit set to %s\n", fe->param.name);
237 return;
238 }
239 }
240
241 /**
242 * ata_force_xfermask - force xfermask according to libata.force
243 * @dev: ATA device of interest
244 *
245 * Force xfer_mask according to libata.force and whine about it.
246 * For consistency with link selection, device number 15 selects
247 * the first device connected to the host link.
248 *
249 * LOCKING:
250 * EH context.
251 */
252 static void ata_force_xfermask(struct ata_device *dev)
253 {
254 int devno = dev->link->pmp + dev->devno;
255 int alt_devno = devno;
256 int i;
257
258 /* allow n.15 for the first device attached to host port */
259 if (ata_is_host_link(dev->link) && devno == 0)
260 alt_devno = 15;
261
262 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
263 const struct ata_force_ent *fe = &ata_force_tbl[i];
264 unsigned long pio_mask, mwdma_mask, udma_mask;
265
266 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
267 continue;
268
269 if (fe->device != -1 && fe->device != devno &&
270 fe->device != alt_devno)
271 continue;
272
273 if (!fe->param.xfer_mask)
274 continue;
275
276 ata_unpack_xfermask(fe->param.xfer_mask,
277 &pio_mask, &mwdma_mask, &udma_mask);
278 if (udma_mask)
279 dev->udma_mask = udma_mask;
280 else if (mwdma_mask) {
281 dev->udma_mask = 0;
282 dev->mwdma_mask = mwdma_mask;
283 } else {
284 dev->udma_mask = 0;
285 dev->mwdma_mask = 0;
286 dev->pio_mask = pio_mask;
287 }
288
289 ata_dev_printk(dev, KERN_NOTICE,
290 "FORCE: xfer_mask set to %s\n", fe->param.name);
291 return;
292 }
293 }
294
295 /**
296 * ata_force_horkage - force horkage according to libata.force
297 * @dev: ATA device of interest
298 *
299 * Force horkage according to libata.force and whine about it.
300 * For consistency with link selection, device number 15 selects
301 * the first device connected to the host link.
302 *
303 * LOCKING:
304 * EH context.
305 */
306 static void ata_force_horkage(struct ata_device *dev)
307 {
308 int devno = dev->link->pmp + dev->devno;
309 int alt_devno = devno;
310 int i;
311
312 /* allow n.15 for the first device attached to host port */
313 if (ata_is_host_link(dev->link) && devno == 0)
314 alt_devno = 15;
315
316 for (i = 0; i < ata_force_tbl_size; i++) {
317 const struct ata_force_ent *fe = &ata_force_tbl[i];
318
319 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
320 continue;
321
322 if (fe->device != -1 && fe->device != devno &&
323 fe->device != alt_devno)
324 continue;
325
326 if (!(~dev->horkage & fe->param.horkage_on) &&
327 !(dev->horkage & fe->param.horkage_off))
328 continue;
329
330 dev->horkage |= fe->param.horkage_on;
331 dev->horkage &= ~fe->param.horkage_off;
332
333 ata_dev_printk(dev, KERN_NOTICE,
334 "FORCE: horkage modified (%s)\n", fe->param.name);
335 }
336 }
337
338 /**
339 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
340 * @opcode: SCSI opcode
341 *
342 * Determine ATAPI command type from @opcode.
343 *
344 * LOCKING:
345 * None.
346 *
347 * RETURNS:
348 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
349 */
350 int atapi_cmd_type(u8 opcode)
351 {
352 switch (opcode) {
353 case GPCMD_READ_10:
354 case GPCMD_READ_12:
355 return ATAPI_READ;
356
357 case GPCMD_WRITE_10:
358 case GPCMD_WRITE_12:
359 case GPCMD_WRITE_AND_VERIFY_10:
360 return ATAPI_WRITE;
361
362 case GPCMD_READ_CD:
363 case GPCMD_READ_CD_MSF:
364 return ATAPI_READ_CD;
365
366 case ATA_16:
367 case ATA_12:
368 if (atapi_passthru16)
369 return ATAPI_PASS_THRU;
370 /* fall thru */
371 default:
372 return ATAPI_MISC;
373 }
374 }
375
376 /**
377 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
378 * @tf: Taskfile to convert
379 * @pmp: Port multiplier port
380 * @is_cmd: This FIS is for command
381 * @fis: Buffer into which data will output
382 *
383 * Converts a standard ATA taskfile to a Serial ATA
384 * FIS structure (Register - Host to Device).
385 *
386 * LOCKING:
387 * Inherited from caller.
388 */
389 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
390 {
391 fis[0] = 0x27; /* Register - Host to Device FIS */
392 fis[1] = pmp & 0xf; /* Port multiplier number*/
393 if (is_cmd)
394 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
395
396 fis[2] = tf->command;
397 fis[3] = tf->feature;
398
399 fis[4] = tf->lbal;
400 fis[5] = tf->lbam;
401 fis[6] = tf->lbah;
402 fis[7] = tf->device;
403
404 fis[8] = tf->hob_lbal;
405 fis[9] = tf->hob_lbam;
406 fis[10] = tf->hob_lbah;
407 fis[11] = tf->hob_feature;
408
409 fis[12] = tf->nsect;
410 fis[13] = tf->hob_nsect;
411 fis[14] = 0;
412 fis[15] = tf->ctl;
413
414 fis[16] = 0;
415 fis[17] = 0;
416 fis[18] = 0;
417 fis[19] = 0;
418 }
419
420 /**
421 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
422 * @fis: Buffer from which data will be input
423 * @tf: Taskfile to output
424 *
425 * Converts a serial ATA FIS structure to a standard ATA taskfile.
426 *
427 * LOCKING:
428 * Inherited from caller.
429 */
430
431 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
432 {
433 tf->command = fis[2]; /* status */
434 tf->feature = fis[3]; /* error */
435
436 tf->lbal = fis[4];
437 tf->lbam = fis[5];
438 tf->lbah = fis[6];
439 tf->device = fis[7];
440
441 tf->hob_lbal = fis[8];
442 tf->hob_lbam = fis[9];
443 tf->hob_lbah = fis[10];
444
445 tf->nsect = fis[12];
446 tf->hob_nsect = fis[13];
447 }
448
449 static const u8 ata_rw_cmds[] = {
450 /* pio multi */
451 ATA_CMD_READ_MULTI,
452 ATA_CMD_WRITE_MULTI,
453 ATA_CMD_READ_MULTI_EXT,
454 ATA_CMD_WRITE_MULTI_EXT,
455 0,
456 0,
457 0,
458 ATA_CMD_WRITE_MULTI_FUA_EXT,
459 /* pio */
460 ATA_CMD_PIO_READ,
461 ATA_CMD_PIO_WRITE,
462 ATA_CMD_PIO_READ_EXT,
463 ATA_CMD_PIO_WRITE_EXT,
464 0,
465 0,
466 0,
467 0,
468 /* dma */
469 ATA_CMD_READ,
470 ATA_CMD_WRITE,
471 ATA_CMD_READ_EXT,
472 ATA_CMD_WRITE_EXT,
473 0,
474 0,
475 0,
476 ATA_CMD_WRITE_FUA_EXT
477 };
478
479 /**
480 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
481 * @tf: command to examine and configure
482 * @dev: device tf belongs to
483 *
484 * Examine the device configuration and tf->flags to calculate
485 * the proper read/write commands and protocol to use.
486 *
487 * LOCKING:
488 * caller.
489 */
490 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
491 {
492 u8 cmd;
493
494 int index, fua, lba48, write;
495
496 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
497 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
498 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
499
500 if (dev->flags & ATA_DFLAG_PIO) {
501 tf->protocol = ATA_PROT_PIO;
502 index = dev->multi_count ? 0 : 8;
503 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
504 /* Unable to use DMA due to host limitation */
505 tf->protocol = ATA_PROT_PIO;
506 index = dev->multi_count ? 0 : 8;
507 } else {
508 tf->protocol = ATA_PROT_DMA;
509 index = 16;
510 }
511
512 cmd = ata_rw_cmds[index + fua + lba48 + write];
513 if (cmd) {
514 tf->command = cmd;
515 return 0;
516 }
517 return -1;
518 }
519
520 /**
521 * ata_tf_read_block - Read block address from ATA taskfile
522 * @tf: ATA taskfile of interest
523 * @dev: ATA device @tf belongs to
524 *
525 * LOCKING:
526 * None.
527 *
528 * Read block address from @tf. This function can handle all
529 * three address formats - LBA, LBA48 and CHS. tf->protocol and
530 * flags select the address format to use.
531 *
532 * RETURNS:
533 * Block address read from @tf.
534 */
535 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
536 {
537 u64 block = 0;
538
539 if (tf->flags & ATA_TFLAG_LBA) {
540 if (tf->flags & ATA_TFLAG_LBA48) {
541 block |= (u64)tf->hob_lbah << 40;
542 block |= (u64)tf->hob_lbam << 32;
543 block |= tf->hob_lbal << 24;
544 } else
545 block |= (tf->device & 0xf) << 24;
546
547 block |= tf->lbah << 16;
548 block |= tf->lbam << 8;
549 block |= tf->lbal;
550 } else {
551 u32 cyl, head, sect;
552
553 cyl = tf->lbam | (tf->lbah << 8);
554 head = tf->device & 0xf;
555 sect = tf->lbal;
556
557 block = (cyl * dev->heads + head) * dev->sectors + sect;
558 }
559
560 return block;
561 }
562
563 /**
564 * ata_build_rw_tf - Build ATA taskfile for given read/write request
565 * @tf: Target ATA taskfile
566 * @dev: ATA device @tf belongs to
567 * @block: Block address
568 * @n_block: Number of blocks
569 * @tf_flags: RW/FUA etc...
570 * @tag: tag
571 *
572 * LOCKING:
573 * None.
574 *
575 * Build ATA taskfile @tf for read/write request described by
576 * @block, @n_block, @tf_flags and @tag on @dev.
577 *
578 * RETURNS:
579 *
580 * 0 on success, -ERANGE if the request is too large for @dev,
581 * -EINVAL if the request is invalid.
582 */
583 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
584 u64 block, u32 n_block, unsigned int tf_flags,
585 unsigned int tag)
586 {
587 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
588 tf->flags |= tf_flags;
589
590 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
591 /* yay, NCQ */
592 if (!lba_48_ok(block, n_block))
593 return -ERANGE;
594
595 tf->protocol = ATA_PROT_NCQ;
596 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
597
598 if (tf->flags & ATA_TFLAG_WRITE)
599 tf->command = ATA_CMD_FPDMA_WRITE;
600 else
601 tf->command = ATA_CMD_FPDMA_READ;
602
603 tf->nsect = tag << 3;
604 tf->hob_feature = (n_block >> 8) & 0xff;
605 tf->feature = n_block & 0xff;
606
607 tf->hob_lbah = (block >> 40) & 0xff;
608 tf->hob_lbam = (block >> 32) & 0xff;
609 tf->hob_lbal = (block >> 24) & 0xff;
610 tf->lbah = (block >> 16) & 0xff;
611 tf->lbam = (block >> 8) & 0xff;
612 tf->lbal = block & 0xff;
613
614 tf->device = 1 << 6;
615 if (tf->flags & ATA_TFLAG_FUA)
616 tf->device |= 1 << 7;
617 } else if (dev->flags & ATA_DFLAG_LBA) {
618 tf->flags |= ATA_TFLAG_LBA;
619
620 if (lba_28_ok(block, n_block)) {
621 /* use LBA28 */
622 tf->device |= (block >> 24) & 0xf;
623 } else if (lba_48_ok(block, n_block)) {
624 if (!(dev->flags & ATA_DFLAG_LBA48))
625 return -ERANGE;
626
627 /* use LBA48 */
628 tf->flags |= ATA_TFLAG_LBA48;
629
630 tf->hob_nsect = (n_block >> 8) & 0xff;
631
632 tf->hob_lbah = (block >> 40) & 0xff;
633 tf->hob_lbam = (block >> 32) & 0xff;
634 tf->hob_lbal = (block >> 24) & 0xff;
635 } else
636 /* request too large even for LBA48 */
637 return -ERANGE;
638
639 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
640 return -EINVAL;
641
642 tf->nsect = n_block & 0xff;
643
644 tf->lbah = (block >> 16) & 0xff;
645 tf->lbam = (block >> 8) & 0xff;
646 tf->lbal = block & 0xff;
647
648 tf->device |= ATA_LBA;
649 } else {
650 /* CHS */
651 u32 sect, head, cyl, track;
652
653 /* The request -may- be too large for CHS addressing. */
654 if (!lba_28_ok(block, n_block))
655 return -ERANGE;
656
657 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
658 return -EINVAL;
659
660 /* Convert LBA to CHS */
661 track = (u32)block / dev->sectors;
662 cyl = track / dev->heads;
663 head = track % dev->heads;
664 sect = (u32)block % dev->sectors + 1;
665
666 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
667 (u32)block, track, cyl, head, sect);
668
669 /* Check whether the converted CHS can fit.
670 Cylinder: 0-65535
671 Head: 0-15
672 Sector: 1-255*/
673 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
674 return -ERANGE;
675
676 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
677 tf->lbal = sect;
678 tf->lbam = cyl;
679 tf->lbah = cyl >> 8;
680 tf->device |= head;
681 }
682
683 return 0;
684 }
685
686 /**
687 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
688 * @pio_mask: pio_mask
689 * @mwdma_mask: mwdma_mask
690 * @udma_mask: udma_mask
691 *
692 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
693 * unsigned int xfer_mask.
694 *
695 * LOCKING:
696 * None.
697 *
698 * RETURNS:
699 * Packed xfer_mask.
700 */
701 unsigned long ata_pack_xfermask(unsigned long pio_mask,
702 unsigned long mwdma_mask,
703 unsigned long udma_mask)
704 {
705 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
706 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
707 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
708 }
709
710 /**
711 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
712 * @xfer_mask: xfer_mask to unpack
713 * @pio_mask: resulting pio_mask
714 * @mwdma_mask: resulting mwdma_mask
715 * @udma_mask: resulting udma_mask
716 *
717 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
718 * Any NULL distination masks will be ignored.
719 */
720 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
721 unsigned long *mwdma_mask, unsigned long *udma_mask)
722 {
723 if (pio_mask)
724 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
725 if (mwdma_mask)
726 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
727 if (udma_mask)
728 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
729 }
730
731 static const struct ata_xfer_ent {
732 int shift, bits;
733 u8 base;
734 } ata_xfer_tbl[] = {
735 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
736 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
737 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
738 { -1, },
739 };
740
741 /**
742 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
743 * @xfer_mask: xfer_mask of interest
744 *
745 * Return matching XFER_* value for @xfer_mask. Only the highest
746 * bit of @xfer_mask is considered.
747 *
748 * LOCKING:
749 * None.
750 *
751 * RETURNS:
752 * Matching XFER_* value, 0xff if no match found.
753 */
754 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
755 {
756 int highbit = fls(xfer_mask) - 1;
757 const struct ata_xfer_ent *ent;
758
759 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
760 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
761 return ent->base + highbit - ent->shift;
762 return 0xff;
763 }
764
765 /**
766 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
767 * @xfer_mode: XFER_* of interest
768 *
769 * Return matching xfer_mask for @xfer_mode.
770 *
771 * LOCKING:
772 * None.
773 *
774 * RETURNS:
775 * Matching xfer_mask, 0 if no match found.
776 */
777 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
778 {
779 const struct ata_xfer_ent *ent;
780
781 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
782 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
783 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
784 & ~((1 << ent->shift) - 1);
785 return 0;
786 }
787
788 /**
789 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
790 * @xfer_mode: XFER_* of interest
791 *
792 * Return matching xfer_shift for @xfer_mode.
793 *
794 * LOCKING:
795 * None.
796 *
797 * RETURNS:
798 * Matching xfer_shift, -1 if no match found.
799 */
800 int ata_xfer_mode2shift(unsigned long xfer_mode)
801 {
802 const struct ata_xfer_ent *ent;
803
804 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
805 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
806 return ent->shift;
807 return -1;
808 }
809
810 /**
811 * ata_mode_string - convert xfer_mask to string
812 * @xfer_mask: mask of bits supported; only highest bit counts.
813 *
814 * Determine string which represents the highest speed
815 * (highest bit in @modemask).
816 *
817 * LOCKING:
818 * None.
819 *
820 * RETURNS:
821 * Constant C string representing highest speed listed in
822 * @mode_mask, or the constant C string "<n/a>".
823 */
824 const char *ata_mode_string(unsigned long xfer_mask)
825 {
826 static const char * const xfer_mode_str[] = {
827 "PIO0",
828 "PIO1",
829 "PIO2",
830 "PIO3",
831 "PIO4",
832 "PIO5",
833 "PIO6",
834 "MWDMA0",
835 "MWDMA1",
836 "MWDMA2",
837 "MWDMA3",
838 "MWDMA4",
839 "UDMA/16",
840 "UDMA/25",
841 "UDMA/33",
842 "UDMA/44",
843 "UDMA/66",
844 "UDMA/100",
845 "UDMA/133",
846 "UDMA7",
847 };
848 int highbit;
849
850 highbit = fls(xfer_mask) - 1;
851 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
852 return xfer_mode_str[highbit];
853 return "<n/a>";
854 }
855
856 static const char *sata_spd_string(unsigned int spd)
857 {
858 static const char * const spd_str[] = {
859 "1.5 Gbps",
860 "3.0 Gbps",
861 };
862
863 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
864 return "<unknown>";
865 return spd_str[spd - 1];
866 }
867
868 void ata_dev_disable(struct ata_device *dev)
869 {
870 if (ata_dev_enabled(dev)) {
871 if (ata_msg_drv(dev->link->ap))
872 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
873 ata_acpi_on_disable(dev);
874 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
875 ATA_DNXFER_QUIET);
876 dev->class++;
877 }
878 }
879
880 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
881 {
882 struct ata_link *link = dev->link;
883 struct ata_port *ap = link->ap;
884 u32 scontrol;
885 unsigned int err_mask;
886 int rc;
887
888 /*
889 * disallow DIPM for drivers which haven't set
890 * ATA_FLAG_IPM. This is because when DIPM is enabled,
891 * phy ready will be set in the interrupt status on
892 * state changes, which will cause some drivers to
893 * think there are errors - additionally drivers will
894 * need to disable hot plug.
895 */
896 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
897 ap->pm_policy = NOT_AVAILABLE;
898 return -EINVAL;
899 }
900
901 /*
902 * For DIPM, we will only enable it for the
903 * min_power setting.
904 *
905 * Why? Because Disks are too stupid to know that
906 * If the host rejects a request to go to SLUMBER
907 * they should retry at PARTIAL, and instead it
908 * just would give up. So, for medium_power to
909 * work at all, we need to only allow HIPM.
910 */
911 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
912 if (rc)
913 return rc;
914
915 switch (policy) {
916 case MIN_POWER:
917 /* no restrictions on IPM transitions */
918 scontrol &= ~(0x3 << 8);
919 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
920 if (rc)
921 return rc;
922
923 /* enable DIPM */
924 if (dev->flags & ATA_DFLAG_DIPM)
925 err_mask = ata_dev_set_feature(dev,
926 SETFEATURES_SATA_ENABLE, SATA_DIPM);
927 break;
928 case MEDIUM_POWER:
929 /* allow IPM to PARTIAL */
930 scontrol &= ~(0x1 << 8);
931 scontrol |= (0x2 << 8);
932 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
933 if (rc)
934 return rc;
935
936 /*
937 * we don't have to disable DIPM since IPM flags
938 * disallow transitions to SLUMBER, which effectively
939 * disable DIPM if it does not support PARTIAL
940 */
941 break;
942 case NOT_AVAILABLE:
943 case MAX_PERFORMANCE:
944 /* disable all IPM transitions */
945 scontrol |= (0x3 << 8);
946 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
947 if (rc)
948 return rc;
949
950 /*
951 * we don't have to disable DIPM since IPM flags
952 * disallow all transitions which effectively
953 * disable DIPM anyway.
954 */
955 break;
956 }
957
958 /* FIXME: handle SET FEATURES failure */
959 (void) err_mask;
960
961 return 0;
962 }
963
964 /**
965 * ata_dev_enable_pm - enable SATA interface power management
966 * @dev: device to enable power management
967 * @policy: the link power management policy
968 *
969 * Enable SATA Interface power management. This will enable
970 * Device Interface Power Management (DIPM) for min_power
971 * policy, and then call driver specific callbacks for
972 * enabling Host Initiated Power management.
973 *
974 * Locking: Caller.
975 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
976 */
977 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
978 {
979 int rc = 0;
980 struct ata_port *ap = dev->link->ap;
981
982 /* set HIPM first, then DIPM */
983 if (ap->ops->enable_pm)
984 rc = ap->ops->enable_pm(ap, policy);
985 if (rc)
986 goto enable_pm_out;
987 rc = ata_dev_set_dipm(dev, policy);
988
989 enable_pm_out:
990 if (rc)
991 ap->pm_policy = MAX_PERFORMANCE;
992 else
993 ap->pm_policy = policy;
994 return /* rc */; /* hopefully we can use 'rc' eventually */
995 }
996
997 #ifdef CONFIG_PM
998 /**
999 * ata_dev_disable_pm - disable SATA interface power management
1000 * @dev: device to disable power management
1001 *
1002 * Disable SATA Interface power management. This will disable
1003 * Device Interface Power Management (DIPM) without changing
1004 * policy, call driver specific callbacks for disabling Host
1005 * Initiated Power management.
1006 *
1007 * Locking: Caller.
1008 * Returns: void
1009 */
1010 static void ata_dev_disable_pm(struct ata_device *dev)
1011 {
1012 struct ata_port *ap = dev->link->ap;
1013
1014 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1015 if (ap->ops->disable_pm)
1016 ap->ops->disable_pm(ap);
1017 }
1018 #endif /* CONFIG_PM */
1019
1020 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1021 {
1022 ap->pm_policy = policy;
1023 ap->link.eh_info.action |= ATA_EH_LPM;
1024 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1025 ata_port_schedule_eh(ap);
1026 }
1027
1028 #ifdef CONFIG_PM
1029 static void ata_lpm_enable(struct ata_host *host)
1030 {
1031 struct ata_link *link;
1032 struct ata_port *ap;
1033 struct ata_device *dev;
1034 int i;
1035
1036 for (i = 0; i < host->n_ports; i++) {
1037 ap = host->ports[i];
1038 ata_port_for_each_link(link, ap) {
1039 ata_link_for_each_dev(dev, link)
1040 ata_dev_disable_pm(dev);
1041 }
1042 }
1043 }
1044
1045 static void ata_lpm_disable(struct ata_host *host)
1046 {
1047 int i;
1048
1049 for (i = 0; i < host->n_ports; i++) {
1050 struct ata_port *ap = host->ports[i];
1051 ata_lpm_schedule(ap, ap->pm_policy);
1052 }
1053 }
1054 #endif /* CONFIG_PM */
1055
1056 /**
1057 * ata_dev_classify - determine device type based on ATA-spec signature
1058 * @tf: ATA taskfile register set for device to be identified
1059 *
1060 * Determine from taskfile register contents whether a device is
1061 * ATA or ATAPI, as per "Signature and persistence" section
1062 * of ATA/PI spec (volume 1, sect 5.14).
1063 *
1064 * LOCKING:
1065 * None.
1066 *
1067 * RETURNS:
1068 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1069 * %ATA_DEV_UNKNOWN the event of failure.
1070 */
1071 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1072 {
1073 /* Apple's open source Darwin code hints that some devices only
1074 * put a proper signature into the LBA mid/high registers,
1075 * So, we only check those. It's sufficient for uniqueness.
1076 *
1077 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1078 * signatures for ATA and ATAPI devices attached on SerialATA,
1079 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1080 * spec has never mentioned about using different signatures
1081 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1082 * Multiplier specification began to use 0x69/0x96 to identify
1083 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1084 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1085 * 0x69/0x96 shortly and described them as reserved for
1086 * SerialATA.
1087 *
1088 * We follow the current spec and consider that 0x69/0x96
1089 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1090 */
1091 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1092 DPRINTK("found ATA device by sig\n");
1093 return ATA_DEV_ATA;
1094 }
1095
1096 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1097 DPRINTK("found ATAPI device by sig\n");
1098 return ATA_DEV_ATAPI;
1099 }
1100
1101 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1102 DPRINTK("found PMP device by sig\n");
1103 return ATA_DEV_PMP;
1104 }
1105
1106 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1107 printk(KERN_INFO "ata: SEMB device ignored\n");
1108 return ATA_DEV_SEMB_UNSUP; /* not yet */
1109 }
1110
1111 DPRINTK("unknown device\n");
1112 return ATA_DEV_UNKNOWN;
1113 }
1114
1115 /**
1116 * ata_id_string - Convert IDENTIFY DEVICE page into string
1117 * @id: IDENTIFY DEVICE results we will examine
1118 * @s: string into which data is output
1119 * @ofs: offset into identify device page
1120 * @len: length of string to return. must be an even number.
1121 *
1122 * The strings in the IDENTIFY DEVICE page are broken up into
1123 * 16-bit chunks. Run through the string, and output each
1124 * 8-bit chunk linearly, regardless of platform.
1125 *
1126 * LOCKING:
1127 * caller.
1128 */
1129
1130 void ata_id_string(const u16 *id, unsigned char *s,
1131 unsigned int ofs, unsigned int len)
1132 {
1133 unsigned int c;
1134
1135 while (len > 0) {
1136 c = id[ofs] >> 8;
1137 *s = c;
1138 s++;
1139
1140 c = id[ofs] & 0xff;
1141 *s = c;
1142 s++;
1143
1144 ofs++;
1145 len -= 2;
1146 }
1147 }
1148
1149 /**
1150 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1151 * @id: IDENTIFY DEVICE results we will examine
1152 * @s: string into which data is output
1153 * @ofs: offset into identify device page
1154 * @len: length of string to return. must be an odd number.
1155 *
1156 * This function is identical to ata_id_string except that it
1157 * trims trailing spaces and terminates the resulting string with
1158 * null. @len must be actual maximum length (even number) + 1.
1159 *
1160 * LOCKING:
1161 * caller.
1162 */
1163 void ata_id_c_string(const u16 *id, unsigned char *s,
1164 unsigned int ofs, unsigned int len)
1165 {
1166 unsigned char *p;
1167
1168 WARN_ON(!(len & 1));
1169
1170 ata_id_string(id, s, ofs, len - 1);
1171
1172 p = s + strnlen(s, len - 1);
1173 while (p > s && p[-1] == ' ')
1174 p--;
1175 *p = '\0';
1176 }
1177
1178 static u64 ata_id_n_sectors(const u16 *id)
1179 {
1180 if (ata_id_has_lba(id)) {
1181 if (ata_id_has_lba48(id))
1182 return ata_id_u64(id, 100);
1183 else
1184 return ata_id_u32(id, 60);
1185 } else {
1186 if (ata_id_current_chs_valid(id))
1187 return ata_id_u32(id, 57);
1188 else
1189 return id[1] * id[3] * id[6];
1190 }
1191 }
1192
1193 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1194 {
1195 u64 sectors = 0;
1196
1197 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1198 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1199 sectors |= (tf->hob_lbal & 0xff) << 24;
1200 sectors |= (tf->lbah & 0xff) << 16;
1201 sectors |= (tf->lbam & 0xff) << 8;
1202 sectors |= (tf->lbal & 0xff);
1203
1204 return sectors;
1205 }
1206
1207 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1208 {
1209 u64 sectors = 0;
1210
1211 sectors |= (tf->device & 0x0f) << 24;
1212 sectors |= (tf->lbah & 0xff) << 16;
1213 sectors |= (tf->lbam & 0xff) << 8;
1214 sectors |= (tf->lbal & 0xff);
1215
1216 return sectors;
1217 }
1218
1219 /**
1220 * ata_read_native_max_address - Read native max address
1221 * @dev: target device
1222 * @max_sectors: out parameter for the result native max address
1223 *
1224 * Perform an LBA48 or LBA28 native size query upon the device in
1225 * question.
1226 *
1227 * RETURNS:
1228 * 0 on success, -EACCES if command is aborted by the drive.
1229 * -EIO on other errors.
1230 */
1231 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1232 {
1233 unsigned int err_mask;
1234 struct ata_taskfile tf;
1235 int lba48 = ata_id_has_lba48(dev->id);
1236
1237 ata_tf_init(dev, &tf);
1238
1239 /* always clear all address registers */
1240 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1241
1242 if (lba48) {
1243 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1244 tf.flags |= ATA_TFLAG_LBA48;
1245 } else
1246 tf.command = ATA_CMD_READ_NATIVE_MAX;
1247
1248 tf.protocol |= ATA_PROT_NODATA;
1249 tf.device |= ATA_LBA;
1250
1251 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1252 if (err_mask) {
1253 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1254 "max address (err_mask=0x%x)\n", err_mask);
1255 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1256 return -EACCES;
1257 return -EIO;
1258 }
1259
1260 if (lba48)
1261 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1262 else
1263 *max_sectors = ata_tf_to_lba(&tf) + 1;
1264 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1265 (*max_sectors)--;
1266 return 0;
1267 }
1268
1269 /**
1270 * ata_set_max_sectors - Set max sectors
1271 * @dev: target device
1272 * @new_sectors: new max sectors value to set for the device
1273 *
1274 * Set max sectors of @dev to @new_sectors.
1275 *
1276 * RETURNS:
1277 * 0 on success, -EACCES if command is aborted or denied (due to
1278 * previous non-volatile SET_MAX) by the drive. -EIO on other
1279 * errors.
1280 */
1281 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1282 {
1283 unsigned int err_mask;
1284 struct ata_taskfile tf;
1285 int lba48 = ata_id_has_lba48(dev->id);
1286
1287 new_sectors--;
1288
1289 ata_tf_init(dev, &tf);
1290
1291 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1292
1293 if (lba48) {
1294 tf.command = ATA_CMD_SET_MAX_EXT;
1295 tf.flags |= ATA_TFLAG_LBA48;
1296
1297 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1298 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1299 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1300 } else {
1301 tf.command = ATA_CMD_SET_MAX;
1302
1303 tf.device |= (new_sectors >> 24) & 0xf;
1304 }
1305
1306 tf.protocol |= ATA_PROT_NODATA;
1307 tf.device |= ATA_LBA;
1308
1309 tf.lbal = (new_sectors >> 0) & 0xff;
1310 tf.lbam = (new_sectors >> 8) & 0xff;
1311 tf.lbah = (new_sectors >> 16) & 0xff;
1312
1313 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1314 if (err_mask) {
1315 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1316 "max address (err_mask=0x%x)\n", err_mask);
1317 if (err_mask == AC_ERR_DEV &&
1318 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1319 return -EACCES;
1320 return -EIO;
1321 }
1322
1323 return 0;
1324 }
1325
1326 /**
1327 * ata_hpa_resize - Resize a device with an HPA set
1328 * @dev: Device to resize
1329 *
1330 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1331 * it if required to the full size of the media. The caller must check
1332 * the drive has the HPA feature set enabled.
1333 *
1334 * RETURNS:
1335 * 0 on success, -errno on failure.
1336 */
1337 static int ata_hpa_resize(struct ata_device *dev)
1338 {
1339 struct ata_eh_context *ehc = &dev->link->eh_context;
1340 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1341 u64 sectors = ata_id_n_sectors(dev->id);
1342 u64 native_sectors;
1343 int rc;
1344
1345 /* do we need to do it? */
1346 if (dev->class != ATA_DEV_ATA ||
1347 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1348 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1349 return 0;
1350
1351 /* read native max address */
1352 rc = ata_read_native_max_address(dev, &native_sectors);
1353 if (rc) {
1354 /* If device aborted the command or HPA isn't going to
1355 * be unlocked, skip HPA resizing.
1356 */
1357 if (rc == -EACCES || !ata_ignore_hpa) {
1358 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1359 "broken, skipping HPA handling\n");
1360 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1361
1362 /* we can continue if device aborted the command */
1363 if (rc == -EACCES)
1364 rc = 0;
1365 }
1366
1367 return rc;
1368 }
1369
1370 /* nothing to do? */
1371 if (native_sectors <= sectors || !ata_ignore_hpa) {
1372 if (!print_info || native_sectors == sectors)
1373 return 0;
1374
1375 if (native_sectors > sectors)
1376 ata_dev_printk(dev, KERN_INFO,
1377 "HPA detected: current %llu, native %llu\n",
1378 (unsigned long long)sectors,
1379 (unsigned long long)native_sectors);
1380 else if (native_sectors < sectors)
1381 ata_dev_printk(dev, KERN_WARNING,
1382 "native sectors (%llu) is smaller than "
1383 "sectors (%llu)\n",
1384 (unsigned long long)native_sectors,
1385 (unsigned long long)sectors);
1386 return 0;
1387 }
1388
1389 /* let's unlock HPA */
1390 rc = ata_set_max_sectors(dev, native_sectors);
1391 if (rc == -EACCES) {
1392 /* if device aborted the command, skip HPA resizing */
1393 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1394 "(%llu -> %llu), skipping HPA handling\n",
1395 (unsigned long long)sectors,
1396 (unsigned long long)native_sectors);
1397 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1398 return 0;
1399 } else if (rc)
1400 return rc;
1401
1402 /* re-read IDENTIFY data */
1403 rc = ata_dev_reread_id(dev, 0);
1404 if (rc) {
1405 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1406 "data after HPA resizing\n");
1407 return rc;
1408 }
1409
1410 if (print_info) {
1411 u64 new_sectors = ata_id_n_sectors(dev->id);
1412 ata_dev_printk(dev, KERN_INFO,
1413 "HPA unlocked: %llu -> %llu, native %llu\n",
1414 (unsigned long long)sectors,
1415 (unsigned long long)new_sectors,
1416 (unsigned long long)native_sectors);
1417 }
1418
1419 return 0;
1420 }
1421
1422 /**
1423 * ata_dump_id - IDENTIFY DEVICE info debugging output
1424 * @id: IDENTIFY DEVICE page to dump
1425 *
1426 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1427 * page.
1428 *
1429 * LOCKING:
1430 * caller.
1431 */
1432
1433 static inline void ata_dump_id(const u16 *id)
1434 {
1435 DPRINTK("49==0x%04x "
1436 "53==0x%04x "
1437 "63==0x%04x "
1438 "64==0x%04x "
1439 "75==0x%04x \n",
1440 id[49],
1441 id[53],
1442 id[63],
1443 id[64],
1444 id[75]);
1445 DPRINTK("80==0x%04x "
1446 "81==0x%04x "
1447 "82==0x%04x "
1448 "83==0x%04x "
1449 "84==0x%04x \n",
1450 id[80],
1451 id[81],
1452 id[82],
1453 id[83],
1454 id[84]);
1455 DPRINTK("88==0x%04x "
1456 "93==0x%04x\n",
1457 id[88],
1458 id[93]);
1459 }
1460
1461 /**
1462 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1463 * @id: IDENTIFY data to compute xfer mask from
1464 *
1465 * Compute the xfermask for this device. This is not as trivial
1466 * as it seems if we must consider early devices correctly.
1467 *
1468 * FIXME: pre IDE drive timing (do we care ?).
1469 *
1470 * LOCKING:
1471 * None.
1472 *
1473 * RETURNS:
1474 * Computed xfermask
1475 */
1476 unsigned long ata_id_xfermask(const u16 *id)
1477 {
1478 unsigned long pio_mask, mwdma_mask, udma_mask;
1479
1480 /* Usual case. Word 53 indicates word 64 is valid */
1481 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1482 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1483 pio_mask <<= 3;
1484 pio_mask |= 0x7;
1485 } else {
1486 /* If word 64 isn't valid then Word 51 high byte holds
1487 * the PIO timing number for the maximum. Turn it into
1488 * a mask.
1489 */
1490 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1491 if (mode < 5) /* Valid PIO range */
1492 pio_mask = (2 << mode) - 1;
1493 else
1494 pio_mask = 1;
1495
1496 /* But wait.. there's more. Design your standards by
1497 * committee and you too can get a free iordy field to
1498 * process. However its the speeds not the modes that
1499 * are supported... Note drivers using the timing API
1500 * will get this right anyway
1501 */
1502 }
1503
1504 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1505
1506 if (ata_id_is_cfa(id)) {
1507 /*
1508 * Process compact flash extended modes
1509 */
1510 int pio = id[163] & 0x7;
1511 int dma = (id[163] >> 3) & 7;
1512
1513 if (pio)
1514 pio_mask |= (1 << 5);
1515 if (pio > 1)
1516 pio_mask |= (1 << 6);
1517 if (dma)
1518 mwdma_mask |= (1 << 3);
1519 if (dma > 1)
1520 mwdma_mask |= (1 << 4);
1521 }
1522
1523 udma_mask = 0;
1524 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1525 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1526
1527 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1528 }
1529
1530 /**
1531 * ata_pio_queue_task - Queue port_task
1532 * @ap: The ata_port to queue port_task for
1533 * @fn: workqueue function to be scheduled
1534 * @data: data for @fn to use
1535 * @delay: delay time in msecs for workqueue function
1536 *
1537 * Schedule @fn(@data) for execution after @delay jiffies using
1538 * port_task. There is one port_task per port and it's the
1539 * user(low level driver)'s responsibility to make sure that only
1540 * one task is active at any given time.
1541 *
1542 * libata core layer takes care of synchronization between
1543 * port_task and EH. ata_pio_queue_task() may be ignored for EH
1544 * synchronization.
1545 *
1546 * LOCKING:
1547 * Inherited from caller.
1548 */
1549 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1550 {
1551 ap->port_task_data = data;
1552
1553 /* may fail if ata_port_flush_task() in progress */
1554 queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1555 }
1556
1557 /**
1558 * ata_port_flush_task - Flush port_task
1559 * @ap: The ata_port to flush port_task for
1560 *
1561 * After this function completes, port_task is guranteed not to
1562 * be running or scheduled.
1563 *
1564 * LOCKING:
1565 * Kernel thread context (may sleep)
1566 */
1567 void ata_port_flush_task(struct ata_port *ap)
1568 {
1569 DPRINTK("ENTER\n");
1570
1571 cancel_rearming_delayed_work(&ap->port_task);
1572
1573 if (ata_msg_ctl(ap))
1574 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1575 }
1576
1577 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1578 {
1579 struct completion *waiting = qc->private_data;
1580
1581 complete(waiting);
1582 }
1583
1584 /**
1585 * ata_exec_internal_sg - execute libata internal command
1586 * @dev: Device to which the command is sent
1587 * @tf: Taskfile registers for the command and the result
1588 * @cdb: CDB for packet command
1589 * @dma_dir: Data tranfer direction of the command
1590 * @sgl: sg list for the data buffer of the command
1591 * @n_elem: Number of sg entries
1592 * @timeout: Timeout in msecs (0 for default)
1593 *
1594 * Executes libata internal command with timeout. @tf contains
1595 * command on entry and result on return. Timeout and error
1596 * conditions are reported via return value. No recovery action
1597 * is taken after a command times out. It's caller's duty to
1598 * clean up after timeout.
1599 *
1600 * LOCKING:
1601 * None. Should be called with kernel context, might sleep.
1602 *
1603 * RETURNS:
1604 * Zero on success, AC_ERR_* mask on failure
1605 */
1606 unsigned ata_exec_internal_sg(struct ata_device *dev,
1607 struct ata_taskfile *tf, const u8 *cdb,
1608 int dma_dir, struct scatterlist *sgl,
1609 unsigned int n_elem, unsigned long timeout)
1610 {
1611 struct ata_link *link = dev->link;
1612 struct ata_port *ap = link->ap;
1613 u8 command = tf->command;
1614 struct ata_queued_cmd *qc;
1615 unsigned int tag, preempted_tag;
1616 u32 preempted_sactive, preempted_qc_active;
1617 int preempted_nr_active_links;
1618 DECLARE_COMPLETION_ONSTACK(wait);
1619 unsigned long flags;
1620 unsigned int err_mask;
1621 int rc;
1622
1623 spin_lock_irqsave(ap->lock, flags);
1624
1625 /* no internal command while frozen */
1626 if (ap->pflags & ATA_PFLAG_FROZEN) {
1627 spin_unlock_irqrestore(ap->lock, flags);
1628 return AC_ERR_SYSTEM;
1629 }
1630
1631 /* initialize internal qc */
1632
1633 /* XXX: Tag 0 is used for drivers with legacy EH as some
1634 * drivers choke if any other tag is given. This breaks
1635 * ata_tag_internal() test for those drivers. Don't use new
1636 * EH stuff without converting to it.
1637 */
1638 if (ap->ops->error_handler)
1639 tag = ATA_TAG_INTERNAL;
1640 else
1641 tag = 0;
1642
1643 if (test_and_set_bit(tag, &ap->qc_allocated))
1644 BUG();
1645 qc = __ata_qc_from_tag(ap, tag);
1646
1647 qc->tag = tag;
1648 qc->scsicmd = NULL;
1649 qc->ap = ap;
1650 qc->dev = dev;
1651 ata_qc_reinit(qc);
1652
1653 preempted_tag = link->active_tag;
1654 preempted_sactive = link->sactive;
1655 preempted_qc_active = ap->qc_active;
1656 preempted_nr_active_links = ap->nr_active_links;
1657 link->active_tag = ATA_TAG_POISON;
1658 link->sactive = 0;
1659 ap->qc_active = 0;
1660 ap->nr_active_links = 0;
1661
1662 /* prepare & issue qc */
1663 qc->tf = *tf;
1664 if (cdb)
1665 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1666 qc->flags |= ATA_QCFLAG_RESULT_TF;
1667 qc->dma_dir = dma_dir;
1668 if (dma_dir != DMA_NONE) {
1669 unsigned int i, buflen = 0;
1670 struct scatterlist *sg;
1671
1672 for_each_sg(sgl, sg, n_elem, i)
1673 buflen += sg->length;
1674
1675 ata_sg_init(qc, sgl, n_elem);
1676 qc->nbytes = buflen;
1677 }
1678
1679 qc->private_data = &wait;
1680 qc->complete_fn = ata_qc_complete_internal;
1681
1682 ata_qc_issue(qc);
1683
1684 spin_unlock_irqrestore(ap->lock, flags);
1685
1686 if (!timeout)
1687 timeout = ata_probe_timeout * 1000;
1688
1689 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1690
1691 ata_port_flush_task(ap);
1692
1693 if (!rc) {
1694 spin_lock_irqsave(ap->lock, flags);
1695
1696 /* We're racing with irq here. If we lose, the
1697 * following test prevents us from completing the qc
1698 * twice. If we win, the port is frozen and will be
1699 * cleaned up by ->post_internal_cmd().
1700 */
1701 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1702 qc->err_mask |= AC_ERR_TIMEOUT;
1703
1704 if (ap->ops->error_handler)
1705 ata_port_freeze(ap);
1706 else
1707 ata_qc_complete(qc);
1708
1709 if (ata_msg_warn(ap))
1710 ata_dev_printk(dev, KERN_WARNING,
1711 "qc timeout (cmd 0x%x)\n", command);
1712 }
1713
1714 spin_unlock_irqrestore(ap->lock, flags);
1715 }
1716
1717 /* do post_internal_cmd */
1718 if (ap->ops->post_internal_cmd)
1719 ap->ops->post_internal_cmd(qc);
1720
1721 /* perform minimal error analysis */
1722 if (qc->flags & ATA_QCFLAG_FAILED) {
1723 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1724 qc->err_mask |= AC_ERR_DEV;
1725
1726 if (!qc->err_mask)
1727 qc->err_mask |= AC_ERR_OTHER;
1728
1729 if (qc->err_mask & ~AC_ERR_OTHER)
1730 qc->err_mask &= ~AC_ERR_OTHER;
1731 }
1732
1733 /* finish up */
1734 spin_lock_irqsave(ap->lock, flags);
1735
1736 *tf = qc->result_tf;
1737 err_mask = qc->err_mask;
1738
1739 ata_qc_free(qc);
1740 link->active_tag = preempted_tag;
1741 link->sactive = preempted_sactive;
1742 ap->qc_active = preempted_qc_active;
1743 ap->nr_active_links = preempted_nr_active_links;
1744
1745 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1746 * Until those drivers are fixed, we detect the condition
1747 * here, fail the command with AC_ERR_SYSTEM and reenable the
1748 * port.
1749 *
1750 * Note that this doesn't change any behavior as internal
1751 * command failure results in disabling the device in the
1752 * higher layer for LLDDs without new reset/EH callbacks.
1753 *
1754 * Kill the following code as soon as those drivers are fixed.
1755 */
1756 if (ap->flags & ATA_FLAG_DISABLED) {
1757 err_mask |= AC_ERR_SYSTEM;
1758 ata_port_probe(ap);
1759 }
1760
1761 spin_unlock_irqrestore(ap->lock, flags);
1762
1763 return err_mask;
1764 }
1765
1766 /**
1767 * ata_exec_internal - execute libata internal command
1768 * @dev: Device to which the command is sent
1769 * @tf: Taskfile registers for the command and the result
1770 * @cdb: CDB for packet command
1771 * @dma_dir: Data tranfer direction of the command
1772 * @buf: Data buffer of the command
1773 * @buflen: Length of data buffer
1774 * @timeout: Timeout in msecs (0 for default)
1775 *
1776 * Wrapper around ata_exec_internal_sg() which takes simple
1777 * buffer instead of sg list.
1778 *
1779 * LOCKING:
1780 * None. Should be called with kernel context, might sleep.
1781 *
1782 * RETURNS:
1783 * Zero on success, AC_ERR_* mask on failure
1784 */
1785 unsigned ata_exec_internal(struct ata_device *dev,
1786 struct ata_taskfile *tf, const u8 *cdb,
1787 int dma_dir, void *buf, unsigned int buflen,
1788 unsigned long timeout)
1789 {
1790 struct scatterlist *psg = NULL, sg;
1791 unsigned int n_elem = 0;
1792
1793 if (dma_dir != DMA_NONE) {
1794 WARN_ON(!buf);
1795 sg_init_one(&sg, buf, buflen);
1796 psg = &sg;
1797 n_elem++;
1798 }
1799
1800 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1801 timeout);
1802 }
1803
1804 /**
1805 * ata_do_simple_cmd - execute simple internal command
1806 * @dev: Device to which the command is sent
1807 * @cmd: Opcode to execute
1808 *
1809 * Execute a 'simple' command, that only consists of the opcode
1810 * 'cmd' itself, without filling any other registers
1811 *
1812 * LOCKING:
1813 * Kernel thread context (may sleep).
1814 *
1815 * RETURNS:
1816 * Zero on success, AC_ERR_* mask on failure
1817 */
1818 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1819 {
1820 struct ata_taskfile tf;
1821
1822 ata_tf_init(dev, &tf);
1823
1824 tf.command = cmd;
1825 tf.flags |= ATA_TFLAG_DEVICE;
1826 tf.protocol = ATA_PROT_NODATA;
1827
1828 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1829 }
1830
1831 /**
1832 * ata_pio_need_iordy - check if iordy needed
1833 * @adev: ATA device
1834 *
1835 * Check if the current speed of the device requires IORDY. Used
1836 * by various controllers for chip configuration.
1837 */
1838
1839 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1840 {
1841 /* Controller doesn't support IORDY. Probably a pointless check
1842 as the caller should know this */
1843 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1844 return 0;
1845 /* PIO3 and higher it is mandatory */
1846 if (adev->pio_mode > XFER_PIO_2)
1847 return 1;
1848 /* We turn it on when possible */
1849 if (ata_id_has_iordy(adev->id))
1850 return 1;
1851 return 0;
1852 }
1853
1854 /**
1855 * ata_pio_mask_no_iordy - Return the non IORDY mask
1856 * @adev: ATA device
1857 *
1858 * Compute the highest mode possible if we are not using iordy. Return
1859 * -1 if no iordy mode is available.
1860 */
1861
1862 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1863 {
1864 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1865 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1866 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1867 /* Is the speed faster than the drive allows non IORDY ? */
1868 if (pio) {
1869 /* This is cycle times not frequency - watch the logic! */
1870 if (pio > 240) /* PIO2 is 240nS per cycle */
1871 return 3 << ATA_SHIFT_PIO;
1872 return 7 << ATA_SHIFT_PIO;
1873 }
1874 }
1875 return 3 << ATA_SHIFT_PIO;
1876 }
1877
1878 /**
1879 * ata_dev_read_id - Read ID data from the specified device
1880 * @dev: target device
1881 * @p_class: pointer to class of the target device (may be changed)
1882 * @flags: ATA_READID_* flags
1883 * @id: buffer to read IDENTIFY data into
1884 *
1885 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1886 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1887 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1888 * for pre-ATA4 drives.
1889 *
1890 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1891 * now we abort if we hit that case.
1892 *
1893 * LOCKING:
1894 * Kernel thread context (may sleep)
1895 *
1896 * RETURNS:
1897 * 0 on success, -errno otherwise.
1898 */
1899 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1900 unsigned int flags, u16 *id)
1901 {
1902 struct ata_port *ap = dev->link->ap;
1903 unsigned int class = *p_class;
1904 struct ata_taskfile tf;
1905 unsigned int err_mask = 0;
1906 const char *reason;
1907 int may_fallback = 1, tried_spinup = 0;
1908 int rc;
1909
1910 if (ata_msg_ctl(ap))
1911 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1912
1913 retry:
1914 ata_tf_init(dev, &tf);
1915
1916 switch (class) {
1917 case ATA_DEV_ATA:
1918 tf.command = ATA_CMD_ID_ATA;
1919 break;
1920 case ATA_DEV_ATAPI:
1921 tf.command = ATA_CMD_ID_ATAPI;
1922 break;
1923 default:
1924 rc = -ENODEV;
1925 reason = "unsupported class";
1926 goto err_out;
1927 }
1928
1929 tf.protocol = ATA_PROT_PIO;
1930
1931 /* Some devices choke if TF registers contain garbage. Make
1932 * sure those are properly initialized.
1933 */
1934 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1935
1936 /* Device presence detection is unreliable on some
1937 * controllers. Always poll IDENTIFY if available.
1938 */
1939 tf.flags |= ATA_TFLAG_POLLING;
1940
1941 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1942 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1943 if (err_mask) {
1944 if (err_mask & AC_ERR_NODEV_HINT) {
1945 ata_dev_printk(dev, KERN_DEBUG,
1946 "NODEV after polling detection\n");
1947 return -ENOENT;
1948 }
1949
1950 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1951 /* Device or controller might have reported
1952 * the wrong device class. Give a shot at the
1953 * other IDENTIFY if the current one is
1954 * aborted by the device.
1955 */
1956 if (may_fallback) {
1957 may_fallback = 0;
1958
1959 if (class == ATA_DEV_ATA)
1960 class = ATA_DEV_ATAPI;
1961 else
1962 class = ATA_DEV_ATA;
1963 goto retry;
1964 }
1965
1966 /* Control reaches here iff the device aborted
1967 * both flavors of IDENTIFYs which happens
1968 * sometimes with phantom devices.
1969 */
1970 ata_dev_printk(dev, KERN_DEBUG,
1971 "both IDENTIFYs aborted, assuming NODEV\n");
1972 return -ENOENT;
1973 }
1974
1975 rc = -EIO;
1976 reason = "I/O error";
1977 goto err_out;
1978 }
1979
1980 /* Falling back doesn't make sense if ID data was read
1981 * successfully at least once.
1982 */
1983 may_fallback = 0;
1984
1985 swap_buf_le16(id, ATA_ID_WORDS);
1986
1987 /* sanity check */
1988 rc = -EINVAL;
1989 reason = "device reports invalid type";
1990
1991 if (class == ATA_DEV_ATA) {
1992 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1993 goto err_out;
1994 } else {
1995 if (ata_id_is_ata(id))
1996 goto err_out;
1997 }
1998
1999 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2000 tried_spinup = 1;
2001 /*
2002 * Drive powered-up in standby mode, and requires a specific
2003 * SET_FEATURES spin-up subcommand before it will accept
2004 * anything other than the original IDENTIFY command.
2005 */
2006 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2007 if (err_mask && id[2] != 0x738c) {
2008 rc = -EIO;
2009 reason = "SPINUP failed";
2010 goto err_out;
2011 }
2012 /*
2013 * If the drive initially returned incomplete IDENTIFY info,
2014 * we now must reissue the IDENTIFY command.
2015 */
2016 if (id[2] == 0x37c8)
2017 goto retry;
2018 }
2019
2020 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2021 /*
2022 * The exact sequence expected by certain pre-ATA4 drives is:
2023 * SRST RESET
2024 * IDENTIFY (optional in early ATA)
2025 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2026 * anything else..
2027 * Some drives were very specific about that exact sequence.
2028 *
2029 * Note that ATA4 says lba is mandatory so the second check
2030 * shoud never trigger.
2031 */
2032 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2033 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2034 if (err_mask) {
2035 rc = -EIO;
2036 reason = "INIT_DEV_PARAMS failed";
2037 goto err_out;
2038 }
2039
2040 /* current CHS translation info (id[53-58]) might be
2041 * changed. reread the identify device info.
2042 */
2043 flags &= ~ATA_READID_POSTRESET;
2044 goto retry;
2045 }
2046 }
2047
2048 *p_class = class;
2049
2050 return 0;
2051
2052 err_out:
2053 if (ata_msg_warn(ap))
2054 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2055 "(%s, err_mask=0x%x)\n", reason, err_mask);
2056 return rc;
2057 }
2058
2059 static inline u8 ata_dev_knobble(struct ata_device *dev)
2060 {
2061 struct ata_port *ap = dev->link->ap;
2062 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2063 }
2064
2065 static void ata_dev_config_ncq(struct ata_device *dev,
2066 char *desc, size_t desc_sz)
2067 {
2068 struct ata_port *ap = dev->link->ap;
2069 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2070
2071 if (!ata_id_has_ncq(dev->id)) {
2072 desc[0] = '\0';
2073 return;
2074 }
2075 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2076 snprintf(desc, desc_sz, "NCQ (not used)");
2077 return;
2078 }
2079 if (ap->flags & ATA_FLAG_NCQ) {
2080 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2081 dev->flags |= ATA_DFLAG_NCQ;
2082 }
2083
2084 if (hdepth >= ddepth)
2085 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2086 else
2087 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2088 }
2089
2090 /**
2091 * ata_dev_configure - Configure the specified ATA/ATAPI device
2092 * @dev: Target device to configure
2093 *
2094 * Configure @dev according to @dev->id. Generic and low-level
2095 * driver specific fixups are also applied.
2096 *
2097 * LOCKING:
2098 * Kernel thread context (may sleep)
2099 *
2100 * RETURNS:
2101 * 0 on success, -errno otherwise
2102 */
2103 int ata_dev_configure(struct ata_device *dev)
2104 {
2105 struct ata_port *ap = dev->link->ap;
2106 struct ata_eh_context *ehc = &dev->link->eh_context;
2107 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2108 const u16 *id = dev->id;
2109 unsigned long xfer_mask;
2110 char revbuf[7]; /* XYZ-99\0 */
2111 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2112 char modelbuf[ATA_ID_PROD_LEN+1];
2113 int rc;
2114
2115 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2116 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2117 __func__);
2118 return 0;
2119 }
2120
2121 if (ata_msg_probe(ap))
2122 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2123
2124 /* set horkage */
2125 dev->horkage |= ata_dev_blacklisted(dev);
2126 ata_force_horkage(dev);
2127
2128 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2129 ata_dev_printk(dev, KERN_INFO,
2130 "unsupported device, disabling\n");
2131 ata_dev_disable(dev);
2132 return 0;
2133 }
2134
2135 /* let ACPI work its magic */
2136 rc = ata_acpi_on_devcfg(dev);
2137 if (rc)
2138 return rc;
2139
2140 /* massage HPA, do it early as it might change IDENTIFY data */
2141 rc = ata_hpa_resize(dev);
2142 if (rc)
2143 return rc;
2144
2145 /* print device capabilities */
2146 if (ata_msg_probe(ap))
2147 ata_dev_printk(dev, KERN_DEBUG,
2148 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2149 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2150 __func__,
2151 id[49], id[82], id[83], id[84],
2152 id[85], id[86], id[87], id[88]);
2153
2154 /* initialize to-be-configured parameters */
2155 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2156 dev->max_sectors = 0;
2157 dev->cdb_len = 0;
2158 dev->n_sectors = 0;
2159 dev->cylinders = 0;
2160 dev->heads = 0;
2161 dev->sectors = 0;
2162
2163 /*
2164 * common ATA, ATAPI feature tests
2165 */
2166
2167 /* find max transfer mode; for printk only */
2168 xfer_mask = ata_id_xfermask(id);
2169
2170 if (ata_msg_probe(ap))
2171 ata_dump_id(id);
2172
2173 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2174 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2175 sizeof(fwrevbuf));
2176
2177 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2178 sizeof(modelbuf));
2179
2180 /* ATA-specific feature tests */
2181 if (dev->class == ATA_DEV_ATA) {
2182 if (ata_id_is_cfa(id)) {
2183 if (id[162] & 1) /* CPRM may make this media unusable */
2184 ata_dev_printk(dev, KERN_WARNING,
2185 "supports DRM functions and may "
2186 "not be fully accessable.\n");
2187 snprintf(revbuf, 7, "CFA");
2188 } else {
2189 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2190 /* Warn the user if the device has TPM extensions */
2191 if (ata_id_has_tpm(id))
2192 ata_dev_printk(dev, KERN_WARNING,
2193 "supports DRM functions and may "
2194 "not be fully accessable.\n");
2195 }
2196
2197 dev->n_sectors = ata_id_n_sectors(id);
2198
2199 if (dev->id[59] & 0x100)
2200 dev->multi_count = dev->id[59] & 0xff;
2201
2202 if (ata_id_has_lba(id)) {
2203 const char *lba_desc;
2204 char ncq_desc[20];
2205
2206 lba_desc = "LBA";
2207 dev->flags |= ATA_DFLAG_LBA;
2208 if (ata_id_has_lba48(id)) {
2209 dev->flags |= ATA_DFLAG_LBA48;
2210 lba_desc = "LBA48";
2211
2212 if (dev->n_sectors >= (1UL << 28) &&
2213 ata_id_has_flush_ext(id))
2214 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2215 }
2216
2217 /* config NCQ */
2218 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2219
2220 /* print device info to dmesg */
2221 if (ata_msg_drv(ap) && print_info) {
2222 ata_dev_printk(dev, KERN_INFO,
2223 "%s: %s, %s, max %s\n",
2224 revbuf, modelbuf, fwrevbuf,
2225 ata_mode_string(xfer_mask));
2226 ata_dev_printk(dev, KERN_INFO,
2227 "%Lu sectors, multi %u: %s %s\n",
2228 (unsigned long long)dev->n_sectors,
2229 dev->multi_count, lba_desc, ncq_desc);
2230 }
2231 } else {
2232 /* CHS */
2233
2234 /* Default translation */
2235 dev->cylinders = id[1];
2236 dev->heads = id[3];
2237 dev->sectors = id[6];
2238
2239 if (ata_id_current_chs_valid(id)) {
2240 /* Current CHS translation is valid. */
2241 dev->cylinders = id[54];
2242 dev->heads = id[55];
2243 dev->sectors = id[56];
2244 }
2245
2246 /* print device info to dmesg */
2247 if (ata_msg_drv(ap) && print_info) {
2248 ata_dev_printk(dev, KERN_INFO,
2249 "%s: %s, %s, max %s\n",
2250 revbuf, modelbuf, fwrevbuf,
2251 ata_mode_string(xfer_mask));
2252 ata_dev_printk(dev, KERN_INFO,
2253 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2254 (unsigned long long)dev->n_sectors,
2255 dev->multi_count, dev->cylinders,
2256 dev->heads, dev->sectors);
2257 }
2258 }
2259
2260 dev->cdb_len = 16;
2261 }
2262
2263 /* ATAPI-specific feature tests */
2264 else if (dev->class == ATA_DEV_ATAPI) {
2265 const char *cdb_intr_string = "";
2266 const char *atapi_an_string = "";
2267 const char *dma_dir_string = "";
2268 u32 sntf;
2269
2270 rc = atapi_cdb_len(id);
2271 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2272 if (ata_msg_warn(ap))
2273 ata_dev_printk(dev, KERN_WARNING,
2274 "unsupported CDB len\n");
2275 rc = -EINVAL;
2276 goto err_out_nosup;
2277 }
2278 dev->cdb_len = (unsigned int) rc;
2279
2280 /* Enable ATAPI AN if both the host and device have
2281 * the support. If PMP is attached, SNTF is required
2282 * to enable ATAPI AN to discern between PHY status
2283 * changed notifications and ATAPI ANs.
2284 */
2285 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2286 (!sata_pmp_attached(ap) ||
2287 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2288 unsigned int err_mask;
2289
2290 /* issue SET feature command to turn this on */
2291 err_mask = ata_dev_set_feature(dev,
2292 SETFEATURES_SATA_ENABLE, SATA_AN);
2293 if (err_mask)
2294 ata_dev_printk(dev, KERN_ERR,
2295 "failed to enable ATAPI AN "
2296 "(err_mask=0x%x)\n", err_mask);
2297 else {
2298 dev->flags |= ATA_DFLAG_AN;
2299 atapi_an_string = ", ATAPI AN";
2300 }
2301 }
2302
2303 if (ata_id_cdb_intr(dev->id)) {
2304 dev->flags |= ATA_DFLAG_CDB_INTR;
2305 cdb_intr_string = ", CDB intr";
2306 }
2307
2308 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2309 dev->flags |= ATA_DFLAG_DMADIR;
2310 dma_dir_string = ", DMADIR";
2311 }
2312
2313 /* print device info to dmesg */
2314 if (ata_msg_drv(ap) && print_info)
2315 ata_dev_printk(dev, KERN_INFO,
2316 "ATAPI: %s, %s, max %s%s%s%s\n",
2317 modelbuf, fwrevbuf,
2318 ata_mode_string(xfer_mask),
2319 cdb_intr_string, atapi_an_string,
2320 dma_dir_string);
2321 }
2322
2323 /* determine max_sectors */
2324 dev->max_sectors = ATA_MAX_SECTORS;
2325 if (dev->flags & ATA_DFLAG_LBA48)
2326 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2327
2328 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2329 if (ata_id_has_hipm(dev->id))
2330 dev->flags |= ATA_DFLAG_HIPM;
2331 if (ata_id_has_dipm(dev->id))
2332 dev->flags |= ATA_DFLAG_DIPM;
2333 }
2334
2335 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2336 200 sectors */
2337 if (ata_dev_knobble(dev)) {
2338 if (ata_msg_drv(ap) && print_info)
2339 ata_dev_printk(dev, KERN_INFO,
2340 "applying bridge limits\n");
2341 dev->udma_mask &= ATA_UDMA5;
2342 dev->max_sectors = ATA_MAX_SECTORS;
2343 }
2344
2345 if ((dev->class == ATA_DEV_ATAPI) &&
2346 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2347 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2348 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2349 }
2350
2351 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2352 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2353 dev->max_sectors);
2354
2355 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2356 dev->horkage |= ATA_HORKAGE_IPM;
2357
2358 /* reset link pm_policy for this port to no pm */
2359 ap->pm_policy = MAX_PERFORMANCE;
2360 }
2361
2362 if (ap->ops->dev_config)
2363 ap->ops->dev_config(dev);
2364
2365 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2366 /* Let the user know. We don't want to disallow opens for
2367 rescue purposes, or in case the vendor is just a blithering
2368 idiot. Do this after the dev_config call as some controllers
2369 with buggy firmware may want to avoid reporting false device
2370 bugs */
2371
2372 if (print_info) {
2373 ata_dev_printk(dev, KERN_WARNING,
2374 "Drive reports diagnostics failure. This may indicate a drive\n");
2375 ata_dev_printk(dev, KERN_WARNING,
2376 "fault or invalid emulation. Contact drive vendor for information.\n");
2377 }
2378 }
2379
2380 return 0;
2381
2382 err_out_nosup:
2383 if (ata_msg_probe(ap))
2384 ata_dev_printk(dev, KERN_DEBUG,
2385 "%s: EXIT, err\n", __func__);
2386 return rc;
2387 }
2388
2389 /**
2390 * ata_cable_40wire - return 40 wire cable type
2391 * @ap: port
2392 *
2393 * Helper method for drivers which want to hardwire 40 wire cable
2394 * detection.
2395 */
2396
2397 int ata_cable_40wire(struct ata_port *ap)
2398 {
2399 return ATA_CBL_PATA40;
2400 }
2401
2402 /**
2403 * ata_cable_80wire - return 80 wire cable type
2404 * @ap: port
2405 *
2406 * Helper method for drivers which want to hardwire 80 wire cable
2407 * detection.
2408 */
2409
2410 int ata_cable_80wire(struct ata_port *ap)
2411 {
2412 return ATA_CBL_PATA80;
2413 }
2414
2415 /**
2416 * ata_cable_unknown - return unknown PATA cable.
2417 * @ap: port
2418 *
2419 * Helper method for drivers which have no PATA cable detection.
2420 */
2421
2422 int ata_cable_unknown(struct ata_port *ap)
2423 {
2424 return ATA_CBL_PATA_UNK;
2425 }
2426
2427 /**
2428 * ata_cable_ignore - return ignored PATA cable.
2429 * @ap: port
2430 *
2431 * Helper method for drivers which don't use cable type to limit
2432 * transfer mode.
2433 */
2434 int ata_cable_ignore(struct ata_port *ap)
2435 {
2436 return ATA_CBL_PATA_IGN;
2437 }
2438
2439 /**
2440 * ata_cable_sata - return SATA cable type
2441 * @ap: port
2442 *
2443 * Helper method for drivers which have SATA cables
2444 */
2445
2446 int ata_cable_sata(struct ata_port *ap)
2447 {
2448 return ATA_CBL_SATA;
2449 }
2450
2451 /**
2452 * ata_bus_probe - Reset and probe ATA bus
2453 * @ap: Bus to probe
2454 *
2455 * Master ATA bus probing function. Initiates a hardware-dependent
2456 * bus reset, then attempts to identify any devices found on
2457 * the bus.
2458 *
2459 * LOCKING:
2460 * PCI/etc. bus probe sem.
2461 *
2462 * RETURNS:
2463 * Zero on success, negative errno otherwise.
2464 */
2465
2466 int ata_bus_probe(struct ata_port *ap)
2467 {
2468 unsigned int classes[ATA_MAX_DEVICES];
2469 int tries[ATA_MAX_DEVICES];
2470 int rc;
2471 struct ata_device *dev;
2472
2473 ata_port_probe(ap);
2474
2475 ata_link_for_each_dev(dev, &ap->link)
2476 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2477
2478 retry:
2479 ata_link_for_each_dev(dev, &ap->link) {
2480 /* If we issue an SRST then an ATA drive (not ATAPI)
2481 * may change configuration and be in PIO0 timing. If
2482 * we do a hard reset (or are coming from power on)
2483 * this is true for ATA or ATAPI. Until we've set a
2484 * suitable controller mode we should not touch the
2485 * bus as we may be talking too fast.
2486 */
2487 dev->pio_mode = XFER_PIO_0;
2488
2489 /* If the controller has a pio mode setup function
2490 * then use it to set the chipset to rights. Don't
2491 * touch the DMA setup as that will be dealt with when
2492 * configuring devices.
2493 */
2494 if (ap->ops->set_piomode)
2495 ap->ops->set_piomode(ap, dev);
2496 }
2497
2498 /* reset and determine device classes */
2499 ap->ops->phy_reset(ap);
2500
2501 ata_link_for_each_dev(dev, &ap->link) {
2502 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2503 dev->class != ATA_DEV_UNKNOWN)
2504 classes[dev->devno] = dev->class;
2505 else
2506 classes[dev->devno] = ATA_DEV_NONE;
2507
2508 dev->class = ATA_DEV_UNKNOWN;
2509 }
2510
2511 ata_port_probe(ap);
2512
2513 /* read IDENTIFY page and configure devices. We have to do the identify
2514 specific sequence bass-ackwards so that PDIAG- is released by
2515 the slave device */
2516
2517 ata_link_for_each_dev_reverse(dev, &ap->link) {
2518 if (tries[dev->devno])
2519 dev->class = classes[dev->devno];
2520
2521 if (!ata_dev_enabled(dev))
2522 continue;
2523
2524 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2525 dev->id);
2526 if (rc)
2527 goto fail;
2528 }
2529
2530 /* Now ask for the cable type as PDIAG- should have been released */
2531 if (ap->ops->cable_detect)
2532 ap->cbl = ap->ops->cable_detect(ap);
2533
2534 /* We may have SATA bridge glue hiding here irrespective of the
2535 reported cable types and sensed types */
2536 ata_link_for_each_dev(dev, &ap->link) {
2537 if (!ata_dev_enabled(dev))
2538 continue;
2539 /* SATA drives indicate we have a bridge. We don't know which
2540 end of the link the bridge is which is a problem */
2541 if (ata_id_is_sata(dev->id))
2542 ap->cbl = ATA_CBL_SATA;
2543 }
2544
2545 /* After the identify sequence we can now set up the devices. We do
2546 this in the normal order so that the user doesn't get confused */
2547
2548 ata_link_for_each_dev(dev, &ap->link) {
2549 if (!ata_dev_enabled(dev))
2550 continue;
2551
2552 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2553 rc = ata_dev_configure(dev);
2554 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2555 if (rc)
2556 goto fail;
2557 }
2558
2559 /* configure transfer mode */
2560 rc = ata_set_mode(&ap->link, &dev);
2561 if (rc)
2562 goto fail;
2563
2564 ata_link_for_each_dev(dev, &ap->link)
2565 if (ata_dev_enabled(dev))
2566 return 0;
2567
2568 /* no device present, disable port */
2569 ata_port_disable(ap);
2570 return -ENODEV;
2571
2572 fail:
2573 tries[dev->devno]--;
2574
2575 switch (rc) {
2576 case -EINVAL:
2577 /* eeek, something went very wrong, give up */
2578 tries[dev->devno] = 0;
2579 break;
2580
2581 case -ENODEV:
2582 /* give it just one more chance */
2583 tries[dev->devno] = min(tries[dev->devno], 1);
2584 case -EIO:
2585 if (tries[dev->devno] == 1) {
2586 /* This is the last chance, better to slow
2587 * down than lose it.
2588 */
2589 sata_down_spd_limit(&ap->link);
2590 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2591 }
2592 }
2593
2594 if (!tries[dev->devno])
2595 ata_dev_disable(dev);
2596
2597 goto retry;
2598 }
2599
2600 /**
2601 * ata_port_probe - Mark port as enabled
2602 * @ap: Port for which we indicate enablement
2603 *
2604 * Modify @ap data structure such that the system
2605 * thinks that the entire port is enabled.
2606 *
2607 * LOCKING: host lock, or some other form of
2608 * serialization.
2609 */
2610
2611 void ata_port_probe(struct ata_port *ap)
2612 {
2613 ap->flags &= ~ATA_FLAG_DISABLED;
2614 }
2615
2616 /**
2617 * sata_print_link_status - Print SATA link status
2618 * @link: SATA link to printk link status about
2619 *
2620 * This function prints link speed and status of a SATA link.
2621 *
2622 * LOCKING:
2623 * None.
2624 */
2625 static void sata_print_link_status(struct ata_link *link)
2626 {
2627 u32 sstatus, scontrol, tmp;
2628
2629 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2630 return;
2631 sata_scr_read(link, SCR_CONTROL, &scontrol);
2632
2633 if (ata_link_online(link)) {
2634 tmp = (sstatus >> 4) & 0xf;
2635 ata_link_printk(link, KERN_INFO,
2636 "SATA link up %s (SStatus %X SControl %X)\n",
2637 sata_spd_string(tmp), sstatus, scontrol);
2638 } else {
2639 ata_link_printk(link, KERN_INFO,
2640 "SATA link down (SStatus %X SControl %X)\n",
2641 sstatus, scontrol);
2642 }
2643 }
2644
2645 /**
2646 * ata_dev_pair - return other device on cable
2647 * @adev: device
2648 *
2649 * Obtain the other device on the same cable, or if none is
2650 * present NULL is returned
2651 */
2652
2653 struct ata_device *ata_dev_pair(struct ata_device *adev)
2654 {
2655 struct ata_link *link = adev->link;
2656 struct ata_device *pair = &link->device[1 - adev->devno];
2657 if (!ata_dev_enabled(pair))
2658 return NULL;
2659 return pair;
2660 }
2661
2662 /**
2663 * ata_port_disable - Disable port.
2664 * @ap: Port to be disabled.
2665 *
2666 * Modify @ap data structure such that the system
2667 * thinks that the entire port is disabled, and should
2668 * never attempt to probe or communicate with devices
2669 * on this port.
2670 *
2671 * LOCKING: host lock, or some other form of
2672 * serialization.
2673 */
2674
2675 void ata_port_disable(struct ata_port *ap)
2676 {
2677 ap->link.device[0].class = ATA_DEV_NONE;
2678 ap->link.device[1].class = ATA_DEV_NONE;
2679 ap->flags |= ATA_FLAG_DISABLED;
2680 }
2681
2682 /**
2683 * sata_down_spd_limit - adjust SATA spd limit downward
2684 * @link: Link to adjust SATA spd limit for
2685 *
2686 * Adjust SATA spd limit of @link downward. Note that this
2687 * function only adjusts the limit. The change must be applied
2688 * using sata_set_spd().
2689 *
2690 * LOCKING:
2691 * Inherited from caller.
2692 *
2693 * RETURNS:
2694 * 0 on success, negative errno on failure
2695 */
2696 int sata_down_spd_limit(struct ata_link *link)
2697 {
2698 u32 sstatus, spd, mask;
2699 int rc, highbit;
2700
2701 if (!sata_scr_valid(link))
2702 return -EOPNOTSUPP;
2703
2704 /* If SCR can be read, use it to determine the current SPD.
2705 * If not, use cached value in link->sata_spd.
2706 */
2707 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2708 if (rc == 0)
2709 spd = (sstatus >> 4) & 0xf;
2710 else
2711 spd = link->sata_spd;
2712
2713 mask = link->sata_spd_limit;
2714 if (mask <= 1)
2715 return -EINVAL;
2716
2717 /* unconditionally mask off the highest bit */
2718 highbit = fls(mask) - 1;
2719 mask &= ~(1 << highbit);
2720
2721 /* Mask off all speeds higher than or equal to the current
2722 * one. Force 1.5Gbps if current SPD is not available.
2723 */
2724 if (spd > 1)
2725 mask &= (1 << (spd - 1)) - 1;
2726 else
2727 mask &= 1;
2728
2729 /* were we already at the bottom? */
2730 if (!mask)
2731 return -EINVAL;
2732
2733 link->sata_spd_limit = mask;
2734
2735 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2736 sata_spd_string(fls(mask)));
2737
2738 return 0;
2739 }
2740
2741 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2742 {
2743 struct ata_link *host_link = &link->ap->link;
2744 u32 limit, target, spd;
2745
2746 limit = link->sata_spd_limit;
2747
2748 /* Don't configure downstream link faster than upstream link.
2749 * It doesn't speed up anything and some PMPs choke on such
2750 * configuration.
2751 */
2752 if (!ata_is_host_link(link) && host_link->sata_spd)
2753 limit &= (1 << host_link->sata_spd) - 1;
2754
2755 if (limit == UINT_MAX)
2756 target = 0;
2757 else
2758 target = fls(limit);
2759
2760 spd = (*scontrol >> 4) & 0xf;
2761 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2762
2763 return spd != target;
2764 }
2765
2766 /**
2767 * sata_set_spd_needed - is SATA spd configuration needed
2768 * @link: Link in question
2769 *
2770 * Test whether the spd limit in SControl matches
2771 * @link->sata_spd_limit. This function is used to determine
2772 * whether hardreset is necessary to apply SATA spd
2773 * configuration.
2774 *
2775 * LOCKING:
2776 * Inherited from caller.
2777 *
2778 * RETURNS:
2779 * 1 if SATA spd configuration is needed, 0 otherwise.
2780 */
2781 static int sata_set_spd_needed(struct ata_link *link)
2782 {
2783 u32 scontrol;
2784
2785 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2786 return 1;
2787
2788 return __sata_set_spd_needed(link, &scontrol);
2789 }
2790
2791 /**
2792 * sata_set_spd - set SATA spd according to spd limit
2793 * @link: Link to set SATA spd for
2794 *
2795 * Set SATA spd of @link according to sata_spd_limit.
2796 *
2797 * LOCKING:
2798 * Inherited from caller.
2799 *
2800 * RETURNS:
2801 * 0 if spd doesn't need to be changed, 1 if spd has been
2802 * changed. Negative errno if SCR registers are inaccessible.
2803 */
2804 int sata_set_spd(struct ata_link *link)
2805 {
2806 u32 scontrol;
2807 int rc;
2808
2809 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2810 return rc;
2811
2812 if (!__sata_set_spd_needed(link, &scontrol))
2813 return 0;
2814
2815 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2816 return rc;
2817
2818 return 1;
2819 }
2820
2821 /*
2822 * This mode timing computation functionality is ported over from
2823 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2824 */
2825 /*
2826 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2827 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2828 * for UDMA6, which is currently supported only by Maxtor drives.
2829 *
2830 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2831 */
2832
2833 static const struct ata_timing ata_timing[] = {
2834 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2835 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2836 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2837 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2838 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2839 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2840 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2841 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2842
2843 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2844 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2845 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2846
2847 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2848 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2849 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2850 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2851 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2852
2853 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2854 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2855 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2856 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2857 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2858 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2859 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2860 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2861
2862 { 0xFF }
2863 };
2864
2865 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2866 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2867
2868 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2869 {
2870 q->setup = EZ(t->setup * 1000, T);
2871 q->act8b = EZ(t->act8b * 1000, T);
2872 q->rec8b = EZ(t->rec8b * 1000, T);
2873 q->cyc8b = EZ(t->cyc8b * 1000, T);
2874 q->active = EZ(t->active * 1000, T);
2875 q->recover = EZ(t->recover * 1000, T);
2876 q->cycle = EZ(t->cycle * 1000, T);
2877 q->udma = EZ(t->udma * 1000, UT);
2878 }
2879
2880 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2881 struct ata_timing *m, unsigned int what)
2882 {
2883 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2884 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2885 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2886 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2887 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2888 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2889 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2890 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2891 }
2892
2893 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2894 {
2895 const struct ata_timing *t = ata_timing;
2896
2897 while (xfer_mode > t->mode)
2898 t++;
2899
2900 if (xfer_mode == t->mode)
2901 return t;
2902 return NULL;
2903 }
2904
2905 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2906 struct ata_timing *t, int T, int UT)
2907 {
2908 const struct ata_timing *s;
2909 struct ata_timing p;
2910
2911 /*
2912 * Find the mode.
2913 */
2914
2915 if (!(s = ata_timing_find_mode(speed)))
2916 return -EINVAL;
2917
2918 memcpy(t, s, sizeof(*s));
2919
2920 /*
2921 * If the drive is an EIDE drive, it can tell us it needs extended
2922 * PIO/MW_DMA cycle timing.
2923 */
2924
2925 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2926 memset(&p, 0, sizeof(p));
2927 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2928 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2929 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2930 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2931 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2932 }
2933 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2934 }
2935
2936 /*
2937 * Convert the timing to bus clock counts.
2938 */
2939
2940 ata_timing_quantize(t, t, T, UT);
2941
2942 /*
2943 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2944 * S.M.A.R.T * and some other commands. We have to ensure that the
2945 * DMA cycle timing is slower/equal than the fastest PIO timing.
2946 */
2947
2948 if (speed > XFER_PIO_6) {
2949 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2950 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2951 }
2952
2953 /*
2954 * Lengthen active & recovery time so that cycle time is correct.
2955 */
2956
2957 if (t->act8b + t->rec8b < t->cyc8b) {
2958 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2959 t->rec8b = t->cyc8b - t->act8b;
2960 }
2961
2962 if (t->active + t->recover < t->cycle) {
2963 t->active += (t->cycle - (t->active + t->recover)) / 2;
2964 t->recover = t->cycle - t->active;
2965 }
2966
2967 /* In a few cases quantisation may produce enough errors to
2968 leave t->cycle too low for the sum of active and recovery
2969 if so we must correct this */
2970 if (t->active + t->recover > t->cycle)
2971 t->cycle = t->active + t->recover;
2972
2973 return 0;
2974 }
2975
2976 /**
2977 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
2978 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
2979 * @cycle: cycle duration in ns
2980 *
2981 * Return matching xfer mode for @cycle. The returned mode is of
2982 * the transfer type specified by @xfer_shift. If @cycle is too
2983 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
2984 * than the fastest known mode, the fasted mode is returned.
2985 *
2986 * LOCKING:
2987 * None.
2988 *
2989 * RETURNS:
2990 * Matching xfer_mode, 0xff if no match found.
2991 */
2992 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
2993 {
2994 u8 base_mode = 0xff, last_mode = 0xff;
2995 const struct ata_xfer_ent *ent;
2996 const struct ata_timing *t;
2997
2998 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
2999 if (ent->shift == xfer_shift)
3000 base_mode = ent->base;
3001
3002 for (t = ata_timing_find_mode(base_mode);
3003 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3004 unsigned short this_cycle;
3005
3006 switch (xfer_shift) {
3007 case ATA_SHIFT_PIO:
3008 case ATA_SHIFT_MWDMA:
3009 this_cycle = t->cycle;
3010 break;
3011 case ATA_SHIFT_UDMA:
3012 this_cycle = t->udma;
3013 break;
3014 default:
3015 return 0xff;
3016 }
3017
3018 if (cycle > this_cycle)
3019 break;
3020
3021 last_mode = t->mode;
3022 }
3023
3024 return last_mode;
3025 }
3026
3027 /**
3028 * ata_down_xfermask_limit - adjust dev xfer masks downward
3029 * @dev: Device to adjust xfer masks
3030 * @sel: ATA_DNXFER_* selector
3031 *
3032 * Adjust xfer masks of @dev downward. Note that this function
3033 * does not apply the change. Invoking ata_set_mode() afterwards
3034 * will apply the limit.
3035 *
3036 * LOCKING:
3037 * Inherited from caller.
3038 *
3039 * RETURNS:
3040 * 0 on success, negative errno on failure
3041 */
3042 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3043 {
3044 char buf[32];
3045 unsigned long orig_mask, xfer_mask;
3046 unsigned long pio_mask, mwdma_mask, udma_mask;
3047 int quiet, highbit;
3048
3049 quiet = !!(sel & ATA_DNXFER_QUIET);
3050 sel &= ~ATA_DNXFER_QUIET;
3051
3052 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3053 dev->mwdma_mask,
3054 dev->udma_mask);
3055 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3056
3057 switch (sel) {
3058 case ATA_DNXFER_PIO:
3059 highbit = fls(pio_mask) - 1;
3060 pio_mask &= ~(1 << highbit);
3061 break;
3062
3063 case ATA_DNXFER_DMA:
3064 if (udma_mask) {
3065 highbit = fls(udma_mask) - 1;
3066 udma_mask &= ~(1 << highbit);
3067 if (!udma_mask)
3068 return -ENOENT;
3069 } else if (mwdma_mask) {
3070 highbit = fls(mwdma_mask) - 1;
3071 mwdma_mask &= ~(1 << highbit);
3072 if (!mwdma_mask)
3073 return -ENOENT;
3074 }
3075 break;
3076
3077 case ATA_DNXFER_40C:
3078 udma_mask &= ATA_UDMA_MASK_40C;
3079 break;
3080
3081 case ATA_DNXFER_FORCE_PIO0:
3082 pio_mask &= 1;
3083 case ATA_DNXFER_FORCE_PIO:
3084 mwdma_mask = 0;
3085 udma_mask = 0;
3086 break;
3087
3088 default:
3089 BUG();
3090 }
3091
3092 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3093
3094 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3095 return -ENOENT;
3096
3097 if (!quiet) {
3098 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3099 snprintf(buf, sizeof(buf), "%s:%s",
3100 ata_mode_string(xfer_mask),
3101 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3102 else
3103 snprintf(buf, sizeof(buf), "%s",
3104 ata_mode_string(xfer_mask));
3105
3106 ata_dev_printk(dev, KERN_WARNING,
3107 "limiting speed to %s\n", buf);
3108 }
3109
3110 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3111 &dev->udma_mask);
3112
3113 return 0;
3114 }
3115
3116 static int ata_dev_set_mode(struct ata_device *dev)
3117 {
3118 struct ata_eh_context *ehc = &dev->link->eh_context;
3119 const char *dev_err_whine = "";
3120 int ign_dev_err = 0;
3121 unsigned int err_mask;
3122 int rc;
3123
3124 dev->flags &= ~ATA_DFLAG_PIO;
3125 if (dev->xfer_shift == ATA_SHIFT_PIO)
3126 dev->flags |= ATA_DFLAG_PIO;
3127
3128 err_mask = ata_dev_set_xfermode(dev);
3129
3130 if (err_mask & ~AC_ERR_DEV)
3131 goto fail;
3132
3133 /* revalidate */
3134 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3135 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3136 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3137 if (rc)
3138 return rc;
3139
3140 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3141 /* Old CFA may refuse this command, which is just fine */
3142 if (ata_id_is_cfa(dev->id))
3143 ign_dev_err = 1;
3144 /* Catch several broken garbage emulations plus some pre
3145 ATA devices */
3146 if (ata_id_major_version(dev->id) == 0 &&
3147 dev->pio_mode <= XFER_PIO_2)
3148 ign_dev_err = 1;
3149 /* Some very old devices and some bad newer ones fail
3150 any kind of SET_XFERMODE request but support PIO0-2
3151 timings and no IORDY */
3152 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3153 ign_dev_err = 1;
3154 }
3155 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3156 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3157 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3158 dev->dma_mode == XFER_MW_DMA_0 &&
3159 (dev->id[63] >> 8) & 1)
3160 ign_dev_err = 1;
3161
3162 /* if the device is actually configured correctly, ignore dev err */
3163 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3164 ign_dev_err = 1;
3165
3166 if (err_mask & AC_ERR_DEV) {
3167 if (!ign_dev_err)
3168 goto fail;
3169 else
3170 dev_err_whine = " (device error ignored)";
3171 }
3172
3173 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3174 dev->xfer_shift, (int)dev->xfer_mode);
3175
3176 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3177 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3178 dev_err_whine);
3179
3180 return 0;
3181
3182 fail:
3183 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3184 "(err_mask=0x%x)\n", err_mask);
3185 return -EIO;
3186 }
3187
3188 /**
3189 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3190 * @link: link on which timings will be programmed
3191 * @r_failed_dev: out parameter for failed device
3192 *
3193 * Standard implementation of the function used to tune and set
3194 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3195 * ata_dev_set_mode() fails, pointer to the failing device is
3196 * returned in @r_failed_dev.
3197 *
3198 * LOCKING:
3199 * PCI/etc. bus probe sem.
3200 *
3201 * RETURNS:
3202 * 0 on success, negative errno otherwise
3203 */
3204
3205 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3206 {
3207 struct ata_port *ap = link->ap;
3208 struct ata_device *dev;
3209 int rc = 0, used_dma = 0, found = 0;
3210
3211 /* step 1: calculate xfer_mask */
3212 ata_link_for_each_dev(dev, link) {
3213 unsigned long pio_mask, dma_mask;
3214 unsigned int mode_mask;
3215
3216 if (!ata_dev_enabled(dev))
3217 continue;
3218
3219 mode_mask = ATA_DMA_MASK_ATA;
3220 if (dev->class == ATA_DEV_ATAPI)
3221 mode_mask = ATA_DMA_MASK_ATAPI;
3222 else if (ata_id_is_cfa(dev->id))
3223 mode_mask = ATA_DMA_MASK_CFA;
3224
3225 ata_dev_xfermask(dev);
3226 ata_force_xfermask(dev);
3227
3228 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3229 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3230
3231 if (libata_dma_mask & mode_mask)
3232 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3233 else
3234 dma_mask = 0;
3235
3236 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3237 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3238
3239 found = 1;
3240 if (dev->dma_mode != 0xff)
3241 used_dma = 1;
3242 }
3243 if (!found)
3244 goto out;
3245
3246 /* step 2: always set host PIO timings */
3247 ata_link_for_each_dev(dev, link) {
3248 if (!ata_dev_enabled(dev))
3249 continue;
3250
3251 if (dev->pio_mode == 0xff) {
3252 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3253 rc = -EINVAL;
3254 goto out;
3255 }
3256
3257 dev->xfer_mode = dev->pio_mode;
3258 dev->xfer_shift = ATA_SHIFT_PIO;
3259 if (ap->ops->set_piomode)
3260 ap->ops->set_piomode(ap, dev);
3261 }
3262
3263 /* step 3: set host DMA timings */
3264 ata_link_for_each_dev(dev, link) {
3265 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3266 continue;
3267
3268 dev->xfer_mode = dev->dma_mode;
3269 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3270 if (ap->ops->set_dmamode)
3271 ap->ops->set_dmamode(ap, dev);
3272 }
3273
3274 /* step 4: update devices' xfer mode */
3275 ata_link_for_each_dev(dev, link) {
3276 /* don't update suspended devices' xfer mode */
3277 if (!ata_dev_enabled(dev))
3278 continue;
3279
3280 rc = ata_dev_set_mode(dev);
3281 if (rc)
3282 goto out;
3283 }
3284
3285 /* Record simplex status. If we selected DMA then the other
3286 * host channels are not permitted to do so.
3287 */
3288 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3289 ap->host->simplex_claimed = ap;
3290
3291 out:
3292 if (rc)
3293 *r_failed_dev = dev;
3294 return rc;
3295 }
3296
3297 /**
3298 * ata_wait_ready - wait for link to become ready
3299 * @link: link to be waited on
3300 * @deadline: deadline jiffies for the operation
3301 * @check_ready: callback to check link readiness
3302 *
3303 * Wait for @link to become ready. @check_ready should return
3304 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3305 * link doesn't seem to be occupied, other errno for other error
3306 * conditions.
3307 *
3308 * Transient -ENODEV conditions are allowed for
3309 * ATA_TMOUT_FF_WAIT.
3310 *
3311 * LOCKING:
3312 * EH context.
3313 *
3314 * RETURNS:
3315 * 0 if @linke is ready before @deadline; otherwise, -errno.
3316 */
3317 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3318 int (*check_ready)(struct ata_link *link))
3319 {
3320 unsigned long start = jiffies;
3321 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3322 int warned = 0;
3323
3324 if (time_after(nodev_deadline, deadline))
3325 nodev_deadline = deadline;
3326
3327 while (1) {
3328 unsigned long now = jiffies;
3329 int ready, tmp;
3330
3331 ready = tmp = check_ready(link);
3332 if (ready > 0)
3333 return 0;
3334
3335 /* -ENODEV could be transient. Ignore -ENODEV if link
3336 * is online. Also, some SATA devices take a long
3337 * time to clear 0xff after reset. For example,
3338 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3339 * GoVault needs even more than that. Wait for
3340 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3341 *
3342 * Note that some PATA controllers (pata_ali) explode
3343 * if status register is read more than once when
3344 * there's no device attached.
3345 */
3346 if (ready == -ENODEV) {
3347 if (ata_link_online(link))
3348 ready = 0;
3349 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3350 !ata_link_offline(link) &&
3351 time_before(now, nodev_deadline))
3352 ready = 0;
3353 }
3354
3355 if (ready)
3356 return ready;
3357 if (time_after(now, deadline))
3358 return -EBUSY;
3359
3360 if (!warned && time_after(now, start + 5 * HZ) &&
3361 (deadline - now > 3 * HZ)) {
3362 ata_link_printk(link, KERN_WARNING,
3363 "link is slow to respond, please be patient "
3364 "(ready=%d)\n", tmp);
3365 warned = 1;
3366 }
3367
3368 msleep(50);
3369 }
3370 }
3371
3372 /**
3373 * ata_wait_after_reset - wait for link to become ready after reset
3374 * @link: link to be waited on
3375 * @deadline: deadline jiffies for the operation
3376 * @check_ready: callback to check link readiness
3377 *
3378 * Wait for @link to become ready after reset.
3379 *
3380 * LOCKING:
3381 * EH context.
3382 *
3383 * RETURNS:
3384 * 0 if @linke is ready before @deadline; otherwise, -errno.
3385 */
3386 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3387 int (*check_ready)(struct ata_link *link))
3388 {
3389 msleep(ATA_WAIT_AFTER_RESET);
3390
3391 return ata_wait_ready(link, deadline, check_ready);
3392 }
3393
3394 /**
3395 * sata_link_debounce - debounce SATA phy status
3396 * @link: ATA link to debounce SATA phy status for
3397 * @params: timing parameters { interval, duratinon, timeout } in msec
3398 * @deadline: deadline jiffies for the operation
3399 *
3400 * Make sure SStatus of @link reaches stable state, determined by
3401 * holding the same value where DET is not 1 for @duration polled
3402 * every @interval, before @timeout. Timeout constraints the
3403 * beginning of the stable state. Because DET gets stuck at 1 on
3404 * some controllers after hot unplugging, this functions waits
3405 * until timeout then returns 0 if DET is stable at 1.
3406 *
3407 * @timeout is further limited by @deadline. The sooner of the
3408 * two is used.
3409 *
3410 * LOCKING:
3411 * Kernel thread context (may sleep)
3412 *
3413 * RETURNS:
3414 * 0 on success, -errno on failure.
3415 */
3416 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3417 unsigned long deadline)
3418 {
3419 unsigned long interval = params[0];
3420 unsigned long duration = params[1];
3421 unsigned long last_jiffies, t;
3422 u32 last, cur;
3423 int rc;
3424
3425 t = ata_deadline(jiffies, params[2]);
3426 if (time_before(t, deadline))
3427 deadline = t;
3428
3429 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3430 return rc;
3431 cur &= 0xf;
3432
3433 last = cur;
3434 last_jiffies = jiffies;
3435
3436 while (1) {
3437 msleep(interval);
3438 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3439 return rc;
3440 cur &= 0xf;
3441
3442 /* DET stable? */
3443 if (cur == last) {
3444 if (cur == 1 && time_before(jiffies, deadline))
3445 continue;
3446 if (time_after(jiffies,
3447 ata_deadline(last_jiffies, duration)))
3448 return 0;
3449 continue;
3450 }
3451
3452 /* unstable, start over */
3453 last = cur;
3454 last_jiffies = jiffies;
3455
3456 /* Check deadline. If debouncing failed, return
3457 * -EPIPE to tell upper layer to lower link speed.
3458 */
3459 if (time_after(jiffies, deadline))
3460 return -EPIPE;
3461 }
3462 }
3463
3464 /**
3465 * sata_link_resume - resume SATA link
3466 * @link: ATA link to resume SATA
3467 * @params: timing parameters { interval, duratinon, timeout } in msec
3468 * @deadline: deadline jiffies for the operation
3469 *
3470 * Resume SATA phy @link and debounce it.
3471 *
3472 * LOCKING:
3473 * Kernel thread context (may sleep)
3474 *
3475 * RETURNS:
3476 * 0 on success, -errno on failure.
3477 */
3478 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3479 unsigned long deadline)
3480 {
3481 u32 scontrol, serror;
3482 int rc;
3483
3484 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3485 return rc;
3486
3487 scontrol = (scontrol & 0x0f0) | 0x300;
3488
3489 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3490 return rc;
3491
3492 /* Some PHYs react badly if SStatus is pounded immediately
3493 * after resuming. Delay 200ms before debouncing.
3494 */
3495 msleep(200);
3496
3497 if ((rc = sata_link_debounce(link, params, deadline)))
3498 return rc;
3499
3500 /* clear SError, some PHYs require this even for SRST to work */
3501 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3502 rc = sata_scr_write(link, SCR_ERROR, serror);
3503
3504 return rc != -EINVAL ? rc : 0;
3505 }
3506
3507 /**
3508 * ata_std_prereset - prepare for reset
3509 * @link: ATA link to be reset
3510 * @deadline: deadline jiffies for the operation
3511 *
3512 * @link is about to be reset. Initialize it. Failure from
3513 * prereset makes libata abort whole reset sequence and give up
3514 * that port, so prereset should be best-effort. It does its
3515 * best to prepare for reset sequence but if things go wrong, it
3516 * should just whine, not fail.
3517 *
3518 * LOCKING:
3519 * Kernel thread context (may sleep)
3520 *
3521 * RETURNS:
3522 * 0 on success, -errno otherwise.
3523 */
3524 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3525 {
3526 struct ata_port *ap = link->ap;
3527 struct ata_eh_context *ehc = &link->eh_context;
3528 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3529 int rc;
3530
3531 /* if we're about to do hardreset, nothing more to do */
3532 if (ehc->i.action & ATA_EH_HARDRESET)
3533 return 0;
3534
3535 /* if SATA, resume link */
3536 if (ap->flags & ATA_FLAG_SATA) {
3537 rc = sata_link_resume(link, timing, deadline);
3538 /* whine about phy resume failure but proceed */
3539 if (rc && rc != -EOPNOTSUPP)
3540 ata_link_printk(link, KERN_WARNING, "failed to resume "
3541 "link for reset (errno=%d)\n", rc);
3542 }
3543
3544 /* no point in trying softreset on offline link */
3545 if (ata_link_offline(link))
3546 ehc->i.action &= ~ATA_EH_SOFTRESET;
3547
3548 return 0;
3549 }
3550
3551 /**
3552 * sata_link_hardreset - reset link via SATA phy reset
3553 * @link: link to reset
3554 * @timing: timing parameters { interval, duratinon, timeout } in msec
3555 * @deadline: deadline jiffies for the operation
3556 * @online: optional out parameter indicating link onlineness
3557 * @check_ready: optional callback to check link readiness
3558 *
3559 * SATA phy-reset @link using DET bits of SControl register.
3560 * After hardreset, link readiness is waited upon using
3561 * ata_wait_ready() if @check_ready is specified. LLDs are
3562 * allowed to not specify @check_ready and wait itself after this
3563 * function returns. Device classification is LLD's
3564 * responsibility.
3565 *
3566 * *@online is set to one iff reset succeeded and @link is online
3567 * after reset.
3568 *
3569 * LOCKING:
3570 * Kernel thread context (may sleep)
3571 *
3572 * RETURNS:
3573 * 0 on success, -errno otherwise.
3574 */
3575 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3576 unsigned long deadline,
3577 bool *online, int (*check_ready)(struct ata_link *))
3578 {
3579 u32 scontrol;
3580 int rc;
3581
3582 DPRINTK("ENTER\n");
3583
3584 if (online)
3585 *online = false;
3586
3587 if (sata_set_spd_needed(link)) {
3588 /* SATA spec says nothing about how to reconfigure
3589 * spd. To be on the safe side, turn off phy during
3590 * reconfiguration. This works for at least ICH7 AHCI
3591 * and Sil3124.
3592 */
3593 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3594 goto out;
3595
3596 scontrol = (scontrol & 0x0f0) | 0x304;
3597
3598 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3599 goto out;
3600
3601 sata_set_spd(link);
3602 }
3603
3604 /* issue phy wake/reset */
3605 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3606 goto out;
3607
3608 scontrol = (scontrol & 0x0f0) | 0x301;
3609
3610 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3611 goto out;
3612
3613 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3614 * 10.4.2 says at least 1 ms.
3615 */
3616 msleep(1);
3617
3618 /* bring link back */
3619 rc = sata_link_resume(link, timing, deadline);
3620 if (rc)
3621 goto out;
3622 /* if link is offline nothing more to do */
3623 if (ata_link_offline(link))
3624 goto out;
3625
3626 /* Link is online. From this point, -ENODEV too is an error. */
3627 if (online)
3628 *online = true;
3629
3630 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3631 /* If PMP is supported, we have to do follow-up SRST.
3632 * Some PMPs don't send D2H Reg FIS after hardreset if
3633 * the first port is empty. Wait only for
3634 * ATA_TMOUT_PMP_SRST_WAIT.
3635 */
3636 if (check_ready) {
3637 unsigned long pmp_deadline;
3638
3639 pmp_deadline = ata_deadline(jiffies,
3640 ATA_TMOUT_PMP_SRST_WAIT);
3641 if (time_after(pmp_deadline, deadline))
3642 pmp_deadline = deadline;
3643 ata_wait_ready(link, pmp_deadline, check_ready);
3644 }
3645 rc = -EAGAIN;
3646 goto out;
3647 }
3648
3649 rc = 0;
3650 if (check_ready)
3651 rc = ata_wait_ready(link, deadline, check_ready);
3652 out:
3653 if (rc && rc != -EAGAIN) {
3654 /* online is set iff link is online && reset succeeded */
3655 if (online)
3656 *online = false;
3657 ata_link_printk(link, KERN_ERR,
3658 "COMRESET failed (errno=%d)\n", rc);
3659 }
3660 DPRINTK("EXIT, rc=%d\n", rc);
3661 return rc;
3662 }
3663
3664 /**
3665 * sata_std_hardreset - COMRESET w/o waiting or classification
3666 * @link: link to reset
3667 * @class: resulting class of attached device
3668 * @deadline: deadline jiffies for the operation
3669 *
3670 * Standard SATA COMRESET w/o waiting or classification.
3671 *
3672 * LOCKING:
3673 * Kernel thread context (may sleep)
3674 *
3675 * RETURNS:
3676 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3677 */
3678 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3679 unsigned long deadline)
3680 {
3681 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3682 bool online;
3683 int rc;
3684
3685 /* do hardreset */
3686 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3687 return online ? -EAGAIN : rc;
3688 }
3689
3690 /**
3691 * ata_std_postreset - standard postreset callback
3692 * @link: the target ata_link
3693 * @classes: classes of attached devices
3694 *
3695 * This function is invoked after a successful reset. Note that
3696 * the device might have been reset more than once using
3697 * different reset methods before postreset is invoked.
3698 *
3699 * LOCKING:
3700 * Kernel thread context (may sleep)
3701 */
3702 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3703 {
3704 u32 serror;
3705
3706 DPRINTK("ENTER\n");
3707
3708 /* reset complete, clear SError */
3709 if (!sata_scr_read(link, SCR_ERROR, &serror))
3710 sata_scr_write(link, SCR_ERROR, serror);
3711
3712 /* print link status */
3713 sata_print_link_status(link);
3714
3715 DPRINTK("EXIT\n");
3716 }
3717
3718 /**
3719 * ata_dev_same_device - Determine whether new ID matches configured device
3720 * @dev: device to compare against
3721 * @new_class: class of the new device
3722 * @new_id: IDENTIFY page of the new device
3723 *
3724 * Compare @new_class and @new_id against @dev and determine
3725 * whether @dev is the device indicated by @new_class and
3726 * @new_id.
3727 *
3728 * LOCKING:
3729 * None.
3730 *
3731 * RETURNS:
3732 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3733 */
3734 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3735 const u16 *new_id)
3736 {
3737 const u16 *old_id = dev->id;
3738 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3739 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3740
3741 if (dev->class != new_class) {
3742 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3743 dev->class, new_class);
3744 return 0;
3745 }
3746
3747 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3748 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3749 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3750 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3751
3752 if (strcmp(model[0], model[1])) {
3753 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3754 "'%s' != '%s'\n", model[0], model[1]);
3755 return 0;
3756 }
3757
3758 if (strcmp(serial[0], serial[1])) {
3759 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3760 "'%s' != '%s'\n", serial[0], serial[1]);
3761 return 0;
3762 }
3763
3764 return 1;
3765 }
3766
3767 /**
3768 * ata_dev_reread_id - Re-read IDENTIFY data
3769 * @dev: target ATA device
3770 * @readid_flags: read ID flags
3771 *
3772 * Re-read IDENTIFY page and make sure @dev is still attached to
3773 * the port.
3774 *
3775 * LOCKING:
3776 * Kernel thread context (may sleep)
3777 *
3778 * RETURNS:
3779 * 0 on success, negative errno otherwise
3780 */
3781 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3782 {
3783 unsigned int class = dev->class;
3784 u16 *id = (void *)dev->link->ap->sector_buf;
3785 int rc;
3786
3787 /* read ID data */
3788 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3789 if (rc)
3790 return rc;
3791
3792 /* is the device still there? */
3793 if (!ata_dev_same_device(dev, class, id))
3794 return -ENODEV;
3795
3796 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3797 return 0;
3798 }
3799
3800 /**
3801 * ata_dev_revalidate - Revalidate ATA device
3802 * @dev: device to revalidate
3803 * @new_class: new class code
3804 * @readid_flags: read ID flags
3805 *
3806 * Re-read IDENTIFY page, make sure @dev is still attached to the
3807 * port and reconfigure it according to the new IDENTIFY page.
3808 *
3809 * LOCKING:
3810 * Kernel thread context (may sleep)
3811 *
3812 * RETURNS:
3813 * 0 on success, negative errno otherwise
3814 */
3815 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3816 unsigned int readid_flags)
3817 {
3818 u64 n_sectors = dev->n_sectors;
3819 int rc;
3820
3821 if (!ata_dev_enabled(dev))
3822 return -ENODEV;
3823
3824 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3825 if (ata_class_enabled(new_class) &&
3826 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3827 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3828 dev->class, new_class);
3829 rc = -ENODEV;
3830 goto fail;
3831 }
3832
3833 /* re-read ID */
3834 rc = ata_dev_reread_id(dev, readid_flags);
3835 if (rc)
3836 goto fail;
3837
3838 /* configure device according to the new ID */
3839 rc = ata_dev_configure(dev);
3840 if (rc)
3841 goto fail;
3842
3843 /* verify n_sectors hasn't changed */
3844 if (dev->class == ATA_DEV_ATA && n_sectors &&
3845 dev->n_sectors != n_sectors) {
3846 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3847 "%llu != %llu\n",
3848 (unsigned long long)n_sectors,
3849 (unsigned long long)dev->n_sectors);
3850
3851 /* restore original n_sectors */
3852 dev->n_sectors = n_sectors;
3853
3854 rc = -ENODEV;
3855 goto fail;
3856 }
3857
3858 return 0;
3859
3860 fail:
3861 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3862 return rc;
3863 }
3864
3865 struct ata_blacklist_entry {
3866 const char *model_num;
3867 const char *model_rev;
3868 unsigned long horkage;
3869 };
3870
3871 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3872 /* Devices with DMA related problems under Linux */
3873 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3874 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3875 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3876 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3877 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3878 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3879 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3880 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3881 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3882 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3883 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3884 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3885 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3886 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3887 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3888 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3889 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3890 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3891 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3892 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3893 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3894 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3895 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3896 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3897 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3898 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3899 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3900 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3901 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
3902 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3903 /* Odd clown on sil3726/4726 PMPs */
3904 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
3905
3906 /* Weird ATAPI devices */
3907 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3908
3909 /* Devices we expect to fail diagnostics */
3910
3911 /* Devices where NCQ should be avoided */
3912 /* NCQ is slow */
3913 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3914 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
3915 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3916 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3917 /* NCQ is broken */
3918 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
3919 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3920 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
3921 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
3922
3923 /* Blacklist entries taken from Silicon Image 3124/3132
3924 Windows driver .inf file - also several Linux problem reports */
3925 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3926 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3927 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
3928
3929 /* devices which puke on READ_NATIVE_MAX */
3930 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3931 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3932 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3933 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3934
3935 /* Devices which report 1 sector over size HPA */
3936 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
3937 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
3938 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
3939
3940 /* Devices which get the IVB wrong */
3941 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
3942 /* Maybe we should just blacklist TSSTcorp... */
3943 { "TSSTcorp CDDVDW SH-S202H", "SB00", ATA_HORKAGE_IVB, },
3944 { "TSSTcorp CDDVDW SH-S202H", "SB01", ATA_HORKAGE_IVB, },
3945 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
3946 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
3947 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
3948 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
3949
3950 /* End Marker */
3951 { }
3952 };
3953
3954 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3955 {
3956 const char *p;
3957 int len;
3958
3959 /*
3960 * check for trailing wildcard: *\0
3961 */
3962 p = strchr(patt, wildchar);
3963 if (p && ((*(p + 1)) == 0))
3964 len = p - patt;
3965 else {
3966 len = strlen(name);
3967 if (!len) {
3968 if (!*patt)
3969 return 0;
3970 return -1;
3971 }
3972 }
3973
3974 return strncmp(patt, name, len);
3975 }
3976
3977 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3978 {
3979 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3980 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3981 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3982
3983 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3984 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3985
3986 while (ad->model_num) {
3987 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
3988 if (ad->model_rev == NULL)
3989 return ad->horkage;
3990 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
3991 return ad->horkage;
3992 }
3993 ad++;
3994 }
3995 return 0;
3996 }
3997
3998 static int ata_dma_blacklisted(const struct ata_device *dev)
3999 {
4000 /* We don't support polling DMA.
4001 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4002 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4003 */
4004 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4005 (dev->flags & ATA_DFLAG_CDB_INTR))
4006 return 1;
4007 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4008 }
4009
4010 /**
4011 * ata_is_40wire - check drive side detection
4012 * @dev: device
4013 *
4014 * Perform drive side detection decoding, allowing for device vendors
4015 * who can't follow the documentation.
4016 */
4017
4018 static int ata_is_40wire(struct ata_device *dev)
4019 {
4020 if (dev->horkage & ATA_HORKAGE_IVB)
4021 return ata_drive_40wire_relaxed(dev->id);
4022 return ata_drive_40wire(dev->id);
4023 }
4024
4025 /**
4026 * cable_is_40wire - 40/80/SATA decider
4027 * @ap: port to consider
4028 *
4029 * This function encapsulates the policy for speed management
4030 * in one place. At the moment we don't cache the result but
4031 * there is a good case for setting ap->cbl to the result when
4032 * we are called with unknown cables (and figuring out if it
4033 * impacts hotplug at all).
4034 *
4035 * Return 1 if the cable appears to be 40 wire.
4036 */
4037
4038 static int cable_is_40wire(struct ata_port *ap)
4039 {
4040 struct ata_link *link;
4041 struct ata_device *dev;
4042
4043 /* If the controller thinks we are 40 wire, we are */
4044 if (ap->cbl == ATA_CBL_PATA40)
4045 return 1;
4046 /* If the controller thinks we are 80 wire, we are */
4047 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4048 return 0;
4049 /* If the system is known to be 40 wire short cable (eg laptop),
4050 then we allow 80 wire modes even if the drive isn't sure */
4051 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4052 return 0;
4053 /* If the controller doesn't know we scan
4054
4055 - Note: We look for all 40 wire detects at this point.
4056 Any 80 wire detect is taken to be 80 wire cable
4057 because
4058 - In many setups only the one drive (slave if present)
4059 will give a valid detect
4060 - If you have a non detect capable drive you don't
4061 want it to colour the choice
4062 */
4063 ata_port_for_each_link(link, ap) {
4064 ata_link_for_each_dev(dev, link) {
4065 if (!ata_is_40wire(dev))
4066 return 0;
4067 }
4068 }
4069 return 1;
4070 }
4071
4072 /**
4073 * ata_dev_xfermask - Compute supported xfermask of the given device
4074 * @dev: Device to compute xfermask for
4075 *
4076 * Compute supported xfermask of @dev and store it in
4077 * dev->*_mask. This function is responsible for applying all
4078 * known limits including host controller limits, device
4079 * blacklist, etc...
4080 *
4081 * LOCKING:
4082 * None.
4083 */
4084 static void ata_dev_xfermask(struct ata_device *dev)
4085 {
4086 struct ata_link *link = dev->link;
4087 struct ata_port *ap = link->ap;
4088 struct ata_host *host = ap->host;
4089 unsigned long xfer_mask;
4090
4091 /* controller modes available */
4092 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4093 ap->mwdma_mask, ap->udma_mask);
4094
4095 /* drive modes available */
4096 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4097 dev->mwdma_mask, dev->udma_mask);
4098 xfer_mask &= ata_id_xfermask(dev->id);
4099
4100 /*
4101 * CFA Advanced TrueIDE timings are not allowed on a shared
4102 * cable
4103 */
4104 if (ata_dev_pair(dev)) {
4105 /* No PIO5 or PIO6 */
4106 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4107 /* No MWDMA3 or MWDMA 4 */
4108 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4109 }
4110
4111 if (ata_dma_blacklisted(dev)) {
4112 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4113 ata_dev_printk(dev, KERN_WARNING,
4114 "device is on DMA blacklist, disabling DMA\n");
4115 }
4116
4117 if ((host->flags & ATA_HOST_SIMPLEX) &&
4118 host->simplex_claimed && host->simplex_claimed != ap) {
4119 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4120 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4121 "other device, disabling DMA\n");
4122 }
4123
4124 if (ap->flags & ATA_FLAG_NO_IORDY)
4125 xfer_mask &= ata_pio_mask_no_iordy(dev);
4126
4127 if (ap->ops->mode_filter)
4128 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4129
4130 /* Apply cable rule here. Don't apply it early because when
4131 * we handle hot plug the cable type can itself change.
4132 * Check this last so that we know if the transfer rate was
4133 * solely limited by the cable.
4134 * Unknown or 80 wire cables reported host side are checked
4135 * drive side as well. Cases where we know a 40wire cable
4136 * is used safely for 80 are not checked here.
4137 */
4138 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4139 /* UDMA/44 or higher would be available */
4140 if (cable_is_40wire(ap)) {
4141 ata_dev_printk(dev, KERN_WARNING,
4142 "limited to UDMA/33 due to 40-wire cable\n");
4143 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4144 }
4145
4146 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4147 &dev->mwdma_mask, &dev->udma_mask);
4148 }
4149
4150 /**
4151 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4152 * @dev: Device to which command will be sent
4153 *
4154 * Issue SET FEATURES - XFER MODE command to device @dev
4155 * on port @ap.
4156 *
4157 * LOCKING:
4158 * PCI/etc. bus probe sem.
4159 *
4160 * RETURNS:
4161 * 0 on success, AC_ERR_* mask otherwise.
4162 */
4163
4164 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4165 {
4166 struct ata_taskfile tf;
4167 unsigned int err_mask;
4168
4169 /* set up set-features taskfile */
4170 DPRINTK("set features - xfer mode\n");
4171
4172 /* Some controllers and ATAPI devices show flaky interrupt
4173 * behavior after setting xfer mode. Use polling instead.
4174 */
4175 ata_tf_init(dev, &tf);
4176 tf.command = ATA_CMD_SET_FEATURES;
4177 tf.feature = SETFEATURES_XFER;
4178 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4179 tf.protocol = ATA_PROT_NODATA;
4180 /* If we are using IORDY we must send the mode setting command */
4181 if (ata_pio_need_iordy(dev))
4182 tf.nsect = dev->xfer_mode;
4183 /* If the device has IORDY and the controller does not - turn it off */
4184 else if (ata_id_has_iordy(dev->id))
4185 tf.nsect = 0x01;
4186 else /* In the ancient relic department - skip all of this */
4187 return 0;
4188
4189 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4190
4191 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4192 return err_mask;
4193 }
4194 /**
4195 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4196 * @dev: Device to which command will be sent
4197 * @enable: Whether to enable or disable the feature
4198 * @feature: The sector count represents the feature to set
4199 *
4200 * Issue SET FEATURES - SATA FEATURES command to device @dev
4201 * on port @ap with sector count
4202 *
4203 * LOCKING:
4204 * PCI/etc. bus probe sem.
4205 *
4206 * RETURNS:
4207 * 0 on success, AC_ERR_* mask otherwise.
4208 */
4209 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4210 u8 feature)
4211 {
4212 struct ata_taskfile tf;
4213 unsigned int err_mask;
4214
4215 /* set up set-features taskfile */
4216 DPRINTK("set features - SATA features\n");
4217
4218 ata_tf_init(dev, &tf);
4219 tf.command = ATA_CMD_SET_FEATURES;
4220 tf.feature = enable;
4221 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4222 tf.protocol = ATA_PROT_NODATA;
4223 tf.nsect = feature;
4224
4225 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4226
4227 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4228 return err_mask;
4229 }
4230
4231 /**
4232 * ata_dev_init_params - Issue INIT DEV PARAMS command
4233 * @dev: Device to which command will be sent
4234 * @heads: Number of heads (taskfile parameter)
4235 * @sectors: Number of sectors (taskfile parameter)
4236 *
4237 * LOCKING:
4238 * Kernel thread context (may sleep)
4239 *
4240 * RETURNS:
4241 * 0 on success, AC_ERR_* mask otherwise.
4242 */
4243 static unsigned int ata_dev_init_params(struct ata_device *dev,
4244 u16 heads, u16 sectors)
4245 {
4246 struct ata_taskfile tf;
4247 unsigned int err_mask;
4248
4249 /* Number of sectors per track 1-255. Number of heads 1-16 */
4250 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4251 return AC_ERR_INVALID;
4252
4253 /* set up init dev params taskfile */
4254 DPRINTK("init dev params \n");
4255
4256 ata_tf_init(dev, &tf);
4257 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4258 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4259 tf.protocol = ATA_PROT_NODATA;
4260 tf.nsect = sectors;
4261 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4262
4263 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4264 /* A clean abort indicates an original or just out of spec drive
4265 and we should continue as we issue the setup based on the
4266 drive reported working geometry */
4267 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4268 err_mask = 0;
4269
4270 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4271 return err_mask;
4272 }
4273
4274 /**
4275 * ata_sg_clean - Unmap DMA memory associated with command
4276 * @qc: Command containing DMA memory to be released
4277 *
4278 * Unmap all mapped DMA memory associated with this command.
4279 *
4280 * LOCKING:
4281 * spin_lock_irqsave(host lock)
4282 */
4283 void ata_sg_clean(struct ata_queued_cmd *qc)
4284 {
4285 struct ata_port *ap = qc->ap;
4286 struct scatterlist *sg = qc->sg;
4287 int dir = qc->dma_dir;
4288
4289 WARN_ON(sg == NULL);
4290
4291 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4292
4293 if (qc->n_elem)
4294 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4295
4296 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4297 qc->sg = NULL;
4298 }
4299
4300 /**
4301 * atapi_check_dma - Check whether ATAPI DMA can be supported
4302 * @qc: Metadata associated with taskfile to check
4303 *
4304 * Allow low-level driver to filter ATA PACKET commands, returning
4305 * a status indicating whether or not it is OK to use DMA for the
4306 * supplied PACKET command.
4307 *
4308 * LOCKING:
4309 * spin_lock_irqsave(host lock)
4310 *
4311 * RETURNS: 0 when ATAPI DMA can be used
4312 * nonzero otherwise
4313 */
4314 int atapi_check_dma(struct ata_queued_cmd *qc)
4315 {
4316 struct ata_port *ap = qc->ap;
4317
4318 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4319 * few ATAPI devices choke on such DMA requests.
4320 */
4321 if (unlikely(qc->nbytes & 15))
4322 return 1;
4323
4324 if (ap->ops->check_atapi_dma)
4325 return ap->ops->check_atapi_dma(qc);
4326
4327 return 0;
4328 }
4329
4330 /**
4331 * ata_std_qc_defer - Check whether a qc needs to be deferred
4332 * @qc: ATA command in question
4333 *
4334 * Non-NCQ commands cannot run with any other command, NCQ or
4335 * not. As upper layer only knows the queue depth, we are
4336 * responsible for maintaining exclusion. This function checks
4337 * whether a new command @qc can be issued.
4338 *
4339 * LOCKING:
4340 * spin_lock_irqsave(host lock)
4341 *
4342 * RETURNS:
4343 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4344 */
4345 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4346 {
4347 struct ata_link *link = qc->dev->link;
4348
4349 if (qc->tf.protocol == ATA_PROT_NCQ) {
4350 if (!ata_tag_valid(link->active_tag))
4351 return 0;
4352 } else {
4353 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4354 return 0;
4355 }
4356
4357 return ATA_DEFER_LINK;
4358 }
4359
4360 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4361
4362 /**
4363 * ata_sg_init - Associate command with scatter-gather table.
4364 * @qc: Command to be associated
4365 * @sg: Scatter-gather table.
4366 * @n_elem: Number of elements in s/g table.
4367 *
4368 * Initialize the data-related elements of queued_cmd @qc
4369 * to point to a scatter-gather table @sg, containing @n_elem
4370 * elements.
4371 *
4372 * LOCKING:
4373 * spin_lock_irqsave(host lock)
4374 */
4375 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4376 unsigned int n_elem)
4377 {
4378 qc->sg = sg;
4379 qc->n_elem = n_elem;
4380 qc->cursg = qc->sg;
4381 }
4382
4383 /**
4384 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4385 * @qc: Command with scatter-gather table to be mapped.
4386 *
4387 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4388 *
4389 * LOCKING:
4390 * spin_lock_irqsave(host lock)
4391 *
4392 * RETURNS:
4393 * Zero on success, negative on error.
4394 *
4395 */
4396 static int ata_sg_setup(struct ata_queued_cmd *qc)
4397 {
4398 struct ata_port *ap = qc->ap;
4399 unsigned int n_elem;
4400
4401 VPRINTK("ENTER, ata%u\n", ap->print_id);
4402
4403 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4404 if (n_elem < 1)
4405 return -1;
4406
4407 DPRINTK("%d sg elements mapped\n", n_elem);
4408
4409 qc->n_elem = n_elem;
4410 qc->flags |= ATA_QCFLAG_DMAMAP;
4411
4412 return 0;
4413 }
4414
4415 /**
4416 * swap_buf_le16 - swap halves of 16-bit words in place
4417 * @buf: Buffer to swap
4418 * @buf_words: Number of 16-bit words in buffer.
4419 *
4420 * Swap halves of 16-bit words if needed to convert from
4421 * little-endian byte order to native cpu byte order, or
4422 * vice-versa.
4423 *
4424 * LOCKING:
4425 * Inherited from caller.
4426 */
4427 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4428 {
4429 #ifdef __BIG_ENDIAN
4430 unsigned int i;
4431
4432 for (i = 0; i < buf_words; i++)
4433 buf[i] = le16_to_cpu(buf[i]);
4434 #endif /* __BIG_ENDIAN */
4435 }
4436
4437 /**
4438 * ata_qc_new - Request an available ATA command, for queueing
4439 * @ap: Port associated with device @dev
4440 * @dev: Device from whom we request an available command structure
4441 *
4442 * LOCKING:
4443 * None.
4444 */
4445
4446 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4447 {
4448 struct ata_queued_cmd *qc = NULL;
4449 unsigned int i;
4450
4451 /* no command while frozen */
4452 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4453 return NULL;
4454
4455 /* the last tag is reserved for internal command. */
4456 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4457 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4458 qc = __ata_qc_from_tag(ap, i);
4459 break;
4460 }
4461
4462 if (qc)
4463 qc->tag = i;
4464
4465 return qc;
4466 }
4467
4468 /**
4469 * ata_qc_new_init - Request an available ATA command, and initialize it
4470 * @dev: Device from whom we request an available command structure
4471 *
4472 * LOCKING:
4473 * None.
4474 */
4475
4476 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4477 {
4478 struct ata_port *ap = dev->link->ap;
4479 struct ata_queued_cmd *qc;
4480
4481 qc = ata_qc_new(ap);
4482 if (qc) {
4483 qc->scsicmd = NULL;
4484 qc->ap = ap;
4485 qc->dev = dev;
4486
4487 ata_qc_reinit(qc);
4488 }
4489
4490 return qc;
4491 }
4492
4493 /**
4494 * ata_qc_free - free unused ata_queued_cmd
4495 * @qc: Command to complete
4496 *
4497 * Designed to free unused ata_queued_cmd object
4498 * in case something prevents using it.
4499 *
4500 * LOCKING:
4501 * spin_lock_irqsave(host lock)
4502 */
4503 void ata_qc_free(struct ata_queued_cmd *qc)
4504 {
4505 struct ata_port *ap = qc->ap;
4506 unsigned int tag;
4507
4508 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4509
4510 qc->flags = 0;
4511 tag = qc->tag;
4512 if (likely(ata_tag_valid(tag))) {
4513 qc->tag = ATA_TAG_POISON;
4514 clear_bit(tag, &ap->qc_allocated);
4515 }
4516 }
4517
4518 void __ata_qc_complete(struct ata_queued_cmd *qc)
4519 {
4520 struct ata_port *ap = qc->ap;
4521 struct ata_link *link = qc->dev->link;
4522
4523 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4524 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4525
4526 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4527 ata_sg_clean(qc);
4528
4529 /* command should be marked inactive atomically with qc completion */
4530 if (qc->tf.protocol == ATA_PROT_NCQ) {
4531 link->sactive &= ~(1 << qc->tag);
4532 if (!link->sactive)
4533 ap->nr_active_links--;
4534 } else {
4535 link->active_tag = ATA_TAG_POISON;
4536 ap->nr_active_links--;
4537 }
4538
4539 /* clear exclusive status */
4540 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4541 ap->excl_link == link))
4542 ap->excl_link = NULL;
4543
4544 /* atapi: mark qc as inactive to prevent the interrupt handler
4545 * from completing the command twice later, before the error handler
4546 * is called. (when rc != 0 and atapi request sense is needed)
4547 */
4548 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4549 ap->qc_active &= ~(1 << qc->tag);
4550
4551 /* call completion callback */
4552 qc->complete_fn(qc);
4553 }
4554
4555 static void fill_result_tf(struct ata_queued_cmd *qc)
4556 {
4557 struct ata_port *ap = qc->ap;
4558
4559 qc->result_tf.flags = qc->tf.flags;
4560 ap->ops->qc_fill_rtf(qc);
4561 }
4562
4563 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4564 {
4565 struct ata_device *dev = qc->dev;
4566
4567 if (ata_tag_internal(qc->tag))
4568 return;
4569
4570 if (ata_is_nodata(qc->tf.protocol))
4571 return;
4572
4573 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4574 return;
4575
4576 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4577 }
4578
4579 /**
4580 * ata_qc_complete - Complete an active ATA command
4581 * @qc: Command to complete
4582 * @err_mask: ATA Status register contents
4583 *
4584 * Indicate to the mid and upper layers that an ATA
4585 * command has completed, with either an ok or not-ok status.
4586 *
4587 * LOCKING:
4588 * spin_lock_irqsave(host lock)
4589 */
4590 void ata_qc_complete(struct ata_queued_cmd *qc)
4591 {
4592 struct ata_port *ap = qc->ap;
4593
4594 /* XXX: New EH and old EH use different mechanisms to
4595 * synchronize EH with regular execution path.
4596 *
4597 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4598 * Normal execution path is responsible for not accessing a
4599 * failed qc. libata core enforces the rule by returning NULL
4600 * from ata_qc_from_tag() for failed qcs.
4601 *
4602 * Old EH depends on ata_qc_complete() nullifying completion
4603 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4604 * not synchronize with interrupt handler. Only PIO task is
4605 * taken care of.
4606 */
4607 if (ap->ops->error_handler) {
4608 struct ata_device *dev = qc->dev;
4609 struct ata_eh_info *ehi = &dev->link->eh_info;
4610
4611 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4612
4613 if (unlikely(qc->err_mask))
4614 qc->flags |= ATA_QCFLAG_FAILED;
4615
4616 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4617 if (!ata_tag_internal(qc->tag)) {
4618 /* always fill result TF for failed qc */
4619 fill_result_tf(qc);
4620 ata_qc_schedule_eh(qc);
4621 return;
4622 }
4623 }
4624
4625 /* read result TF if requested */
4626 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4627 fill_result_tf(qc);
4628
4629 /* Some commands need post-processing after successful
4630 * completion.
4631 */
4632 switch (qc->tf.command) {
4633 case ATA_CMD_SET_FEATURES:
4634 if (qc->tf.feature != SETFEATURES_WC_ON &&
4635 qc->tf.feature != SETFEATURES_WC_OFF)
4636 break;
4637 /* fall through */
4638 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4639 case ATA_CMD_SET_MULTI: /* multi_count changed */
4640 /* revalidate device */
4641 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4642 ata_port_schedule_eh(ap);
4643 break;
4644
4645 case ATA_CMD_SLEEP:
4646 dev->flags |= ATA_DFLAG_SLEEPING;
4647 break;
4648 }
4649
4650 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4651 ata_verify_xfer(qc);
4652
4653 __ata_qc_complete(qc);
4654 } else {
4655 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4656 return;
4657
4658 /* read result TF if failed or requested */
4659 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4660 fill_result_tf(qc);
4661
4662 __ata_qc_complete(qc);
4663 }
4664 }
4665
4666 /**
4667 * ata_qc_complete_multiple - Complete multiple qcs successfully
4668 * @ap: port in question
4669 * @qc_active: new qc_active mask
4670 *
4671 * Complete in-flight commands. This functions is meant to be
4672 * called from low-level driver's interrupt routine to complete
4673 * requests normally. ap->qc_active and @qc_active is compared
4674 * and commands are completed accordingly.
4675 *
4676 * LOCKING:
4677 * spin_lock_irqsave(host lock)
4678 *
4679 * RETURNS:
4680 * Number of completed commands on success, -errno otherwise.
4681 */
4682 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4683 {
4684 int nr_done = 0;
4685 u32 done_mask;
4686 int i;
4687
4688 done_mask = ap->qc_active ^ qc_active;
4689
4690 if (unlikely(done_mask & qc_active)) {
4691 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4692 "(%08x->%08x)\n", ap->qc_active, qc_active);
4693 return -EINVAL;
4694 }
4695
4696 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4697 struct ata_queued_cmd *qc;
4698
4699 if (!(done_mask & (1 << i)))
4700 continue;
4701
4702 if ((qc = ata_qc_from_tag(ap, i))) {
4703 ata_qc_complete(qc);
4704 nr_done++;
4705 }
4706 }
4707
4708 return nr_done;
4709 }
4710
4711 /**
4712 * ata_qc_issue - issue taskfile to device
4713 * @qc: command to issue to device
4714 *
4715 * Prepare an ATA command to submission to device.
4716 * This includes mapping the data into a DMA-able
4717 * area, filling in the S/G table, and finally
4718 * writing the taskfile to hardware, starting the command.
4719 *
4720 * LOCKING:
4721 * spin_lock_irqsave(host lock)
4722 */
4723 void ata_qc_issue(struct ata_queued_cmd *qc)
4724 {
4725 struct ata_port *ap = qc->ap;
4726 struct ata_link *link = qc->dev->link;
4727 u8 prot = qc->tf.protocol;
4728
4729 /* Make sure only one non-NCQ command is outstanding. The
4730 * check is skipped for old EH because it reuses active qc to
4731 * request ATAPI sense.
4732 */
4733 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4734
4735 if (ata_is_ncq(prot)) {
4736 WARN_ON(link->sactive & (1 << qc->tag));
4737
4738 if (!link->sactive)
4739 ap->nr_active_links++;
4740 link->sactive |= 1 << qc->tag;
4741 } else {
4742 WARN_ON(link->sactive);
4743
4744 ap->nr_active_links++;
4745 link->active_tag = qc->tag;
4746 }
4747
4748 qc->flags |= ATA_QCFLAG_ACTIVE;
4749 ap->qc_active |= 1 << qc->tag;
4750
4751 /* We guarantee to LLDs that they will have at least one
4752 * non-zero sg if the command is a data command.
4753 */
4754 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
4755
4756 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4757 (ap->flags & ATA_FLAG_PIO_DMA)))
4758 if (ata_sg_setup(qc))
4759 goto sg_err;
4760
4761 /* if device is sleeping, schedule reset and abort the link */
4762 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4763 link->eh_info.action |= ATA_EH_RESET;
4764 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4765 ata_link_abort(link);
4766 return;
4767 }
4768
4769 ap->ops->qc_prep(qc);
4770
4771 qc->err_mask |= ap->ops->qc_issue(qc);
4772 if (unlikely(qc->err_mask))
4773 goto err;
4774 return;
4775
4776 sg_err:
4777 qc->err_mask |= AC_ERR_SYSTEM;
4778 err:
4779 ata_qc_complete(qc);
4780 }
4781
4782 /**
4783 * sata_scr_valid - test whether SCRs are accessible
4784 * @link: ATA link to test SCR accessibility for
4785 *
4786 * Test whether SCRs are accessible for @link.
4787 *
4788 * LOCKING:
4789 * None.
4790 *
4791 * RETURNS:
4792 * 1 if SCRs are accessible, 0 otherwise.
4793 */
4794 int sata_scr_valid(struct ata_link *link)
4795 {
4796 struct ata_port *ap = link->ap;
4797
4798 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
4799 }
4800
4801 /**
4802 * sata_scr_read - read SCR register of the specified port
4803 * @link: ATA link to read SCR for
4804 * @reg: SCR to read
4805 * @val: Place to store read value
4806 *
4807 * Read SCR register @reg of @link into *@val. This function is
4808 * guaranteed to succeed if @link is ap->link, the cable type of
4809 * the port is SATA and the port implements ->scr_read.
4810 *
4811 * LOCKING:
4812 * None if @link is ap->link. Kernel thread context otherwise.
4813 *
4814 * RETURNS:
4815 * 0 on success, negative errno on failure.
4816 */
4817 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
4818 {
4819 if (ata_is_host_link(link)) {
4820 struct ata_port *ap = link->ap;
4821
4822 if (sata_scr_valid(link))
4823 return ap->ops->scr_read(ap, reg, val);
4824 return -EOPNOTSUPP;
4825 }
4826
4827 return sata_pmp_scr_read(link, reg, val);
4828 }
4829
4830 /**
4831 * sata_scr_write - write SCR register of the specified port
4832 * @link: ATA link to write SCR for
4833 * @reg: SCR to write
4834 * @val: value to write
4835 *
4836 * Write @val to SCR register @reg of @link. This function is
4837 * guaranteed to succeed if @link is ap->link, the cable type of
4838 * the port is SATA and the port implements ->scr_read.
4839 *
4840 * LOCKING:
4841 * None if @link is ap->link. Kernel thread context otherwise.
4842 *
4843 * RETURNS:
4844 * 0 on success, negative errno on failure.
4845 */
4846 int sata_scr_write(struct ata_link *link, int reg, u32 val)
4847 {
4848 if (ata_is_host_link(link)) {
4849 struct ata_port *ap = link->ap;
4850
4851 if (sata_scr_valid(link))
4852 return ap->ops->scr_write(ap, reg, val);
4853 return -EOPNOTSUPP;
4854 }
4855
4856 return sata_pmp_scr_write(link, reg, val);
4857 }
4858
4859 /**
4860 * sata_scr_write_flush - write SCR register of the specified port and flush
4861 * @link: ATA link to write SCR for
4862 * @reg: SCR to write
4863 * @val: value to write
4864 *
4865 * This function is identical to sata_scr_write() except that this
4866 * function performs flush after writing to the register.
4867 *
4868 * LOCKING:
4869 * None if @link is ap->link. Kernel thread context otherwise.
4870 *
4871 * RETURNS:
4872 * 0 on success, negative errno on failure.
4873 */
4874 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4875 {
4876 if (ata_is_host_link(link)) {
4877 struct ata_port *ap = link->ap;
4878 int rc;
4879
4880 if (sata_scr_valid(link)) {
4881 rc = ap->ops->scr_write(ap, reg, val);
4882 if (rc == 0)
4883 rc = ap->ops->scr_read(ap, reg, &val);
4884 return rc;
4885 }
4886 return -EOPNOTSUPP;
4887 }
4888
4889 return sata_pmp_scr_write(link, reg, val);
4890 }
4891
4892 /**
4893 * ata_link_online - test whether the given link is online
4894 * @link: ATA link to test
4895 *
4896 * Test whether @link is online. Note that this function returns
4897 * 0 if online status of @link cannot be obtained, so
4898 * ata_link_online(link) != !ata_link_offline(link).
4899 *
4900 * LOCKING:
4901 * None.
4902 *
4903 * RETURNS:
4904 * 1 if the port online status is available and online.
4905 */
4906 int ata_link_online(struct ata_link *link)
4907 {
4908 u32 sstatus;
4909
4910 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4911 (sstatus & 0xf) == 0x3)
4912 return 1;
4913 return 0;
4914 }
4915
4916 /**
4917 * ata_link_offline - test whether the given link is offline
4918 * @link: ATA link to test
4919 *
4920 * Test whether @link is offline. Note that this function
4921 * returns 0 if offline status of @link cannot be obtained, so
4922 * ata_link_online(link) != !ata_link_offline(link).
4923 *
4924 * LOCKING:
4925 * None.
4926 *
4927 * RETURNS:
4928 * 1 if the port offline status is available and offline.
4929 */
4930 int ata_link_offline(struct ata_link *link)
4931 {
4932 u32 sstatus;
4933
4934 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4935 (sstatus & 0xf) != 0x3)
4936 return 1;
4937 return 0;
4938 }
4939
4940 #ifdef CONFIG_PM
4941 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
4942 unsigned int action, unsigned int ehi_flags,
4943 int wait)
4944 {
4945 unsigned long flags;
4946 int i, rc;
4947
4948 for (i = 0; i < host->n_ports; i++) {
4949 struct ata_port *ap = host->ports[i];
4950 struct ata_link *link;
4951
4952 /* Previous resume operation might still be in
4953 * progress. Wait for PM_PENDING to clear.
4954 */
4955 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4956 ata_port_wait_eh(ap);
4957 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4958 }
4959
4960 /* request PM ops to EH */
4961 spin_lock_irqsave(ap->lock, flags);
4962
4963 ap->pm_mesg = mesg;
4964 if (wait) {
4965 rc = 0;
4966 ap->pm_result = &rc;
4967 }
4968
4969 ap->pflags |= ATA_PFLAG_PM_PENDING;
4970 __ata_port_for_each_link(link, ap) {
4971 link->eh_info.action |= action;
4972 link->eh_info.flags |= ehi_flags;
4973 }
4974
4975 ata_port_schedule_eh(ap);
4976
4977 spin_unlock_irqrestore(ap->lock, flags);
4978
4979 /* wait and check result */
4980 if (wait) {
4981 ata_port_wait_eh(ap);
4982 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4983 if (rc)
4984 return rc;
4985 }
4986 }
4987
4988 return 0;
4989 }
4990
4991 /**
4992 * ata_host_suspend - suspend host
4993 * @host: host to suspend
4994 * @mesg: PM message
4995 *
4996 * Suspend @host. Actual operation is performed by EH. This
4997 * function requests EH to perform PM operations and waits for EH
4998 * to finish.
4999 *
5000 * LOCKING:
5001 * Kernel thread context (may sleep).
5002 *
5003 * RETURNS:
5004 * 0 on success, -errno on failure.
5005 */
5006 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5007 {
5008 int rc;
5009
5010 /*
5011 * disable link pm on all ports before requesting
5012 * any pm activity
5013 */
5014 ata_lpm_enable(host);
5015
5016 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5017 if (rc == 0)
5018 host->dev->power.power_state = mesg;
5019 return rc;
5020 }
5021
5022 /**
5023 * ata_host_resume - resume host
5024 * @host: host to resume
5025 *
5026 * Resume @host. Actual operation is performed by EH. This
5027 * function requests EH to perform PM operations and returns.
5028 * Note that all resume operations are performed parallely.
5029 *
5030 * LOCKING:
5031 * Kernel thread context (may sleep).
5032 */
5033 void ata_host_resume(struct ata_host *host)
5034 {
5035 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5036 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5037 host->dev->power.power_state = PMSG_ON;
5038
5039 /* reenable link pm */
5040 ata_lpm_disable(host);
5041 }
5042 #endif
5043
5044 /**
5045 * ata_port_start - Set port up for dma.
5046 * @ap: Port to initialize
5047 *
5048 * Called just after data structures for each port are
5049 * initialized. Allocates space for PRD table.
5050 *
5051 * May be used as the port_start() entry in ata_port_operations.
5052 *
5053 * LOCKING:
5054 * Inherited from caller.
5055 */
5056 int ata_port_start(struct ata_port *ap)
5057 {
5058 struct device *dev = ap->dev;
5059
5060 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5061 GFP_KERNEL);
5062 if (!ap->prd)
5063 return -ENOMEM;
5064
5065 return 0;
5066 }
5067
5068 /**
5069 * ata_dev_init - Initialize an ata_device structure
5070 * @dev: Device structure to initialize
5071 *
5072 * Initialize @dev in preparation for probing.
5073 *
5074 * LOCKING:
5075 * Inherited from caller.
5076 */
5077 void ata_dev_init(struct ata_device *dev)
5078 {
5079 struct ata_link *link = dev->link;
5080 struct ata_port *ap = link->ap;
5081 unsigned long flags;
5082
5083 /* SATA spd limit is bound to the first device */
5084 link->sata_spd_limit = link->hw_sata_spd_limit;
5085 link->sata_spd = 0;
5086
5087 /* High bits of dev->flags are used to record warm plug
5088 * requests which occur asynchronously. Synchronize using
5089 * host lock.
5090 */
5091 spin_lock_irqsave(ap->lock, flags);
5092 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5093 dev->horkage = 0;
5094 spin_unlock_irqrestore(ap->lock, flags);
5095
5096 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5097 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5098 dev->pio_mask = UINT_MAX;
5099 dev->mwdma_mask = UINT_MAX;
5100 dev->udma_mask = UINT_MAX;
5101 }
5102
5103 /**
5104 * ata_link_init - Initialize an ata_link structure
5105 * @ap: ATA port link is attached to
5106 * @link: Link structure to initialize
5107 * @pmp: Port multiplier port number
5108 *
5109 * Initialize @link.
5110 *
5111 * LOCKING:
5112 * Kernel thread context (may sleep)
5113 */
5114 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5115 {
5116 int i;
5117
5118 /* clear everything except for devices */
5119 memset(link, 0, offsetof(struct ata_link, device[0]));
5120
5121 link->ap = ap;
5122 link->pmp = pmp;
5123 link->active_tag = ATA_TAG_POISON;
5124 link->hw_sata_spd_limit = UINT_MAX;
5125
5126 /* can't use iterator, ap isn't initialized yet */
5127 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5128 struct ata_device *dev = &link->device[i];
5129
5130 dev->link = link;
5131 dev->devno = dev - link->device;
5132 ata_dev_init(dev);
5133 }
5134 }
5135
5136 /**
5137 * sata_link_init_spd - Initialize link->sata_spd_limit
5138 * @link: Link to configure sata_spd_limit for
5139 *
5140 * Initialize @link->[hw_]sata_spd_limit to the currently
5141 * configured value.
5142 *
5143 * LOCKING:
5144 * Kernel thread context (may sleep).
5145 *
5146 * RETURNS:
5147 * 0 on success, -errno on failure.
5148 */
5149 int sata_link_init_spd(struct ata_link *link)
5150 {
5151 u32 scontrol;
5152 u8 spd;
5153 int rc;
5154
5155 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
5156 if (rc)
5157 return rc;
5158
5159 spd = (scontrol >> 4) & 0xf;
5160 if (spd)
5161 link->hw_sata_spd_limit &= (1 << spd) - 1;
5162
5163 ata_force_spd_limit(link);
5164
5165 link->sata_spd_limit = link->hw_sata_spd_limit;
5166
5167 return 0;
5168 }
5169
5170 /**
5171 * ata_port_alloc - allocate and initialize basic ATA port resources
5172 * @host: ATA host this allocated port belongs to
5173 *
5174 * Allocate and initialize basic ATA port resources.
5175 *
5176 * RETURNS:
5177 * Allocate ATA port on success, NULL on failure.
5178 *
5179 * LOCKING:
5180 * Inherited from calling layer (may sleep).
5181 */
5182 struct ata_port *ata_port_alloc(struct ata_host *host)
5183 {
5184 struct ata_port *ap;
5185
5186 DPRINTK("ENTER\n");
5187
5188 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5189 if (!ap)
5190 return NULL;
5191
5192 ap->pflags |= ATA_PFLAG_INITIALIZING;
5193 ap->lock = &host->lock;
5194 ap->flags = ATA_FLAG_DISABLED;
5195 ap->print_id = -1;
5196 ap->ctl = ATA_DEVCTL_OBS;
5197 ap->host = host;
5198 ap->dev = host->dev;
5199 ap->last_ctl = 0xFF;
5200
5201 #if defined(ATA_VERBOSE_DEBUG)
5202 /* turn on all debugging levels */
5203 ap->msg_enable = 0x00FF;
5204 #elif defined(ATA_DEBUG)
5205 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5206 #else
5207 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5208 #endif
5209
5210 #ifdef CONFIG_ATA_SFF
5211 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5212 #endif
5213 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5214 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5215 INIT_LIST_HEAD(&ap->eh_done_q);
5216 init_waitqueue_head(&ap->eh_wait_q);
5217 init_timer_deferrable(&ap->fastdrain_timer);
5218 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5219 ap->fastdrain_timer.data = (unsigned long)ap;
5220
5221 ap->cbl = ATA_CBL_NONE;
5222
5223 ata_link_init(ap, &ap->link, 0);
5224
5225 #ifdef ATA_IRQ_TRAP
5226 ap->stats.unhandled_irq = 1;
5227 ap->stats.idle_irq = 1;
5228 #endif
5229 return ap;
5230 }
5231
5232 static void ata_host_release(struct device *gendev, void *res)
5233 {
5234 struct ata_host *host = dev_get_drvdata(gendev);
5235 int i;
5236
5237 for (i = 0; i < host->n_ports; i++) {
5238 struct ata_port *ap = host->ports[i];
5239
5240 if (!ap)
5241 continue;
5242
5243 if (ap->scsi_host)
5244 scsi_host_put(ap->scsi_host);
5245
5246 kfree(ap->pmp_link);
5247 kfree(ap);
5248 host->ports[i] = NULL;
5249 }
5250
5251 dev_set_drvdata(gendev, NULL);
5252 }
5253
5254 /**
5255 * ata_host_alloc - allocate and init basic ATA host resources
5256 * @dev: generic device this host is associated with
5257 * @max_ports: maximum number of ATA ports associated with this host
5258 *
5259 * Allocate and initialize basic ATA host resources. LLD calls
5260 * this function to allocate a host, initializes it fully and
5261 * attaches it using ata_host_register().
5262 *
5263 * @max_ports ports are allocated and host->n_ports is
5264 * initialized to @max_ports. The caller is allowed to decrease
5265 * host->n_ports before calling ata_host_register(). The unused
5266 * ports will be automatically freed on registration.
5267 *
5268 * RETURNS:
5269 * Allocate ATA host on success, NULL on failure.
5270 *
5271 * LOCKING:
5272 * Inherited from calling layer (may sleep).
5273 */
5274 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5275 {
5276 struct ata_host *host;
5277 size_t sz;
5278 int i;
5279
5280 DPRINTK("ENTER\n");
5281
5282 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5283 return NULL;
5284
5285 /* alloc a container for our list of ATA ports (buses) */
5286 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5287 /* alloc a container for our list of ATA ports (buses) */
5288 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5289 if (!host)
5290 goto err_out;
5291
5292 devres_add(dev, host);
5293 dev_set_drvdata(dev, host);
5294
5295 spin_lock_init(&host->lock);
5296 host->dev = dev;
5297 host->n_ports = max_ports;
5298
5299 /* allocate ports bound to this host */
5300 for (i = 0; i < max_ports; i++) {
5301 struct ata_port *ap;
5302
5303 ap = ata_port_alloc(host);
5304 if (!ap)
5305 goto err_out;
5306
5307 ap->port_no = i;
5308 host->ports[i] = ap;
5309 }
5310
5311 devres_remove_group(dev, NULL);
5312 return host;
5313
5314 err_out:
5315 devres_release_group(dev, NULL);
5316 return NULL;
5317 }
5318
5319 /**
5320 * ata_host_alloc_pinfo - alloc host and init with port_info array
5321 * @dev: generic device this host is associated with
5322 * @ppi: array of ATA port_info to initialize host with
5323 * @n_ports: number of ATA ports attached to this host
5324 *
5325 * Allocate ATA host and initialize with info from @ppi. If NULL
5326 * terminated, @ppi may contain fewer entries than @n_ports. The
5327 * last entry will be used for the remaining ports.
5328 *
5329 * RETURNS:
5330 * Allocate ATA host on success, NULL on failure.
5331 *
5332 * LOCKING:
5333 * Inherited from calling layer (may sleep).
5334 */
5335 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5336 const struct ata_port_info * const * ppi,
5337 int n_ports)
5338 {
5339 const struct ata_port_info *pi;
5340 struct ata_host *host;
5341 int i, j;
5342
5343 host = ata_host_alloc(dev, n_ports);
5344 if (!host)
5345 return NULL;
5346
5347 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5348 struct ata_port *ap = host->ports[i];
5349
5350 if (ppi[j])
5351 pi = ppi[j++];
5352
5353 ap->pio_mask = pi->pio_mask;
5354 ap->mwdma_mask = pi->mwdma_mask;
5355 ap->udma_mask = pi->udma_mask;
5356 ap->flags |= pi->flags;
5357 ap->link.flags |= pi->link_flags;
5358 ap->ops = pi->port_ops;
5359
5360 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5361 host->ops = pi->port_ops;
5362 }
5363
5364 return host;
5365 }
5366
5367 static void ata_host_stop(struct device *gendev, void *res)
5368 {
5369 struct ata_host *host = dev_get_drvdata(gendev);
5370 int i;
5371
5372 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5373
5374 for (i = 0; i < host->n_ports; i++) {
5375 struct ata_port *ap = host->ports[i];
5376
5377 if (ap->ops->port_stop)
5378 ap->ops->port_stop(ap);
5379 }
5380
5381 if (host->ops->host_stop)
5382 host->ops->host_stop(host);
5383 }
5384
5385 /**
5386 * ata_finalize_port_ops - finalize ata_port_operations
5387 * @ops: ata_port_operations to finalize
5388 *
5389 * An ata_port_operations can inherit from another ops and that
5390 * ops can again inherit from another. This can go on as many
5391 * times as necessary as long as there is no loop in the
5392 * inheritance chain.
5393 *
5394 * Ops tables are finalized when the host is started. NULL or
5395 * unspecified entries are inherited from the closet ancestor
5396 * which has the method and the entry is populated with it.
5397 * After finalization, the ops table directly points to all the
5398 * methods and ->inherits is no longer necessary and cleared.
5399 *
5400 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5401 *
5402 * LOCKING:
5403 * None.
5404 */
5405 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5406 {
5407 static DEFINE_SPINLOCK(lock);
5408 const struct ata_port_operations *cur;
5409 void **begin = (void **)ops;
5410 void **end = (void **)&ops->inherits;
5411 void **pp;
5412
5413 if (!ops || !ops->inherits)
5414 return;
5415
5416 spin_lock(&lock);
5417
5418 for (cur = ops->inherits; cur; cur = cur->inherits) {
5419 void **inherit = (void **)cur;
5420
5421 for (pp = begin; pp < end; pp++, inherit++)
5422 if (!*pp)
5423 *pp = *inherit;
5424 }
5425
5426 for (pp = begin; pp < end; pp++)
5427 if (IS_ERR(*pp))
5428 *pp = NULL;
5429
5430 ops->inherits = NULL;
5431
5432 spin_unlock(&lock);
5433 }
5434
5435 /**
5436 * ata_host_start - start and freeze ports of an ATA host
5437 * @host: ATA host to start ports for
5438 *
5439 * Start and then freeze ports of @host. Started status is
5440 * recorded in host->flags, so this function can be called
5441 * multiple times. Ports are guaranteed to get started only
5442 * once. If host->ops isn't initialized yet, its set to the
5443 * first non-dummy port ops.
5444 *
5445 * LOCKING:
5446 * Inherited from calling layer (may sleep).
5447 *
5448 * RETURNS:
5449 * 0 if all ports are started successfully, -errno otherwise.
5450 */
5451 int ata_host_start(struct ata_host *host)
5452 {
5453 int have_stop = 0;
5454 void *start_dr = NULL;
5455 int i, rc;
5456
5457 if (host->flags & ATA_HOST_STARTED)
5458 return 0;
5459
5460 ata_finalize_port_ops(host->ops);
5461
5462 for (i = 0; i < host->n_ports; i++) {
5463 struct ata_port *ap = host->ports[i];
5464
5465 ata_finalize_port_ops(ap->ops);
5466
5467 if (!host->ops && !ata_port_is_dummy(ap))
5468 host->ops = ap->ops;
5469
5470 if (ap->ops->port_stop)
5471 have_stop = 1;
5472 }
5473
5474 if (host->ops->host_stop)
5475 have_stop = 1;
5476
5477 if (have_stop) {
5478 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5479 if (!start_dr)
5480 return -ENOMEM;
5481 }
5482
5483 for (i = 0; i < host->n_ports; i++) {
5484 struct ata_port *ap = host->ports[i];
5485
5486 if (ap->ops->port_start) {
5487 rc = ap->ops->port_start(ap);
5488 if (rc) {
5489 if (rc != -ENODEV)
5490 dev_printk(KERN_ERR, host->dev,
5491 "failed to start port %d "
5492 "(errno=%d)\n", i, rc);
5493 goto err_out;
5494 }
5495 }
5496 ata_eh_freeze_port(ap);
5497 }
5498
5499 if (start_dr)
5500 devres_add(host->dev, start_dr);
5501 host->flags |= ATA_HOST_STARTED;
5502 return 0;
5503
5504 err_out:
5505 while (--i >= 0) {
5506 struct ata_port *ap = host->ports[i];
5507
5508 if (ap->ops->port_stop)
5509 ap->ops->port_stop(ap);
5510 }
5511 devres_free(start_dr);
5512 return rc;
5513 }
5514
5515 /**
5516 * ata_sas_host_init - Initialize a host struct
5517 * @host: host to initialize
5518 * @dev: device host is attached to
5519 * @flags: host flags
5520 * @ops: port_ops
5521 *
5522 * LOCKING:
5523 * PCI/etc. bus probe sem.
5524 *
5525 */
5526 /* KILLME - the only user left is ipr */
5527 void ata_host_init(struct ata_host *host, struct device *dev,
5528 unsigned long flags, struct ata_port_operations *ops)
5529 {
5530 spin_lock_init(&host->lock);
5531 host->dev = dev;
5532 host->flags = flags;
5533 host->ops = ops;
5534 }
5535
5536 /**
5537 * ata_host_register - register initialized ATA host
5538 * @host: ATA host to register
5539 * @sht: template for SCSI host
5540 *
5541 * Register initialized ATA host. @host is allocated using
5542 * ata_host_alloc() and fully initialized by LLD. This function
5543 * starts ports, registers @host with ATA and SCSI layers and
5544 * probe registered devices.
5545 *
5546 * LOCKING:
5547 * Inherited from calling layer (may sleep).
5548 *
5549 * RETURNS:
5550 * 0 on success, -errno otherwise.
5551 */
5552 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5553 {
5554 int i, rc;
5555
5556 /* host must have been started */
5557 if (!(host->flags & ATA_HOST_STARTED)) {
5558 dev_printk(KERN_ERR, host->dev,
5559 "BUG: trying to register unstarted host\n");
5560 WARN_ON(1);
5561 return -EINVAL;
5562 }
5563
5564 /* Blow away unused ports. This happens when LLD can't
5565 * determine the exact number of ports to allocate at
5566 * allocation time.
5567 */
5568 for (i = host->n_ports; host->ports[i]; i++)
5569 kfree(host->ports[i]);
5570
5571 /* give ports names and add SCSI hosts */
5572 for (i = 0; i < host->n_ports; i++)
5573 host->ports[i]->print_id = ata_print_id++;
5574
5575 rc = ata_scsi_add_hosts(host, sht);
5576 if (rc)
5577 return rc;
5578
5579 /* associate with ACPI nodes */
5580 ata_acpi_associate(host);
5581
5582 /* set cable, sata_spd_limit and report */
5583 for (i = 0; i < host->n_ports; i++) {
5584 struct ata_port *ap = host->ports[i];
5585 unsigned long xfer_mask;
5586
5587 /* set SATA cable type if still unset */
5588 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5589 ap->cbl = ATA_CBL_SATA;
5590
5591 /* init sata_spd_limit to the current value */
5592 sata_link_init_spd(&ap->link);
5593
5594 /* print per-port info to dmesg */
5595 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5596 ap->udma_mask);
5597
5598 if (!ata_port_is_dummy(ap)) {
5599 ata_port_printk(ap, KERN_INFO,
5600 "%cATA max %s %s\n",
5601 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5602 ata_mode_string(xfer_mask),
5603 ap->link.eh_info.desc);
5604 ata_ehi_clear_desc(&ap->link.eh_info);
5605 } else
5606 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5607 }
5608
5609 /* perform each probe synchronously */
5610 DPRINTK("probe begin\n");
5611 for (i = 0; i < host->n_ports; i++) {
5612 struct ata_port *ap = host->ports[i];
5613
5614 /* probe */
5615 if (ap->ops->error_handler) {
5616 struct ata_eh_info *ehi = &ap->link.eh_info;
5617 unsigned long flags;
5618
5619 ata_port_probe(ap);
5620
5621 /* kick EH for boot probing */
5622 spin_lock_irqsave(ap->lock, flags);
5623
5624 ehi->probe_mask |= ATA_ALL_DEVICES;
5625 ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
5626 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5627
5628 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5629 ap->pflags |= ATA_PFLAG_LOADING;
5630 ata_port_schedule_eh(ap);
5631
5632 spin_unlock_irqrestore(ap->lock, flags);
5633
5634 /* wait for EH to finish */
5635 ata_port_wait_eh(ap);
5636 } else {
5637 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5638 rc = ata_bus_probe(ap);
5639 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5640
5641 if (rc) {
5642 /* FIXME: do something useful here?
5643 * Current libata behavior will
5644 * tear down everything when
5645 * the module is removed
5646 * or the h/w is unplugged.
5647 */
5648 }
5649 }
5650 }
5651
5652 /* probes are done, now scan each port's disk(s) */
5653 DPRINTK("host probe begin\n");
5654 for (i = 0; i < host->n_ports; i++) {
5655 struct ata_port *ap = host->ports[i];
5656
5657 ata_scsi_scan_host(ap, 1);
5658 }
5659
5660 return 0;
5661 }
5662
5663 /**
5664 * ata_host_activate - start host, request IRQ and register it
5665 * @host: target ATA host
5666 * @irq: IRQ to request
5667 * @irq_handler: irq_handler used when requesting IRQ
5668 * @irq_flags: irq_flags used when requesting IRQ
5669 * @sht: scsi_host_template to use when registering the host
5670 *
5671 * After allocating an ATA host and initializing it, most libata
5672 * LLDs perform three steps to activate the host - start host,
5673 * request IRQ and register it. This helper takes necessasry
5674 * arguments and performs the three steps in one go.
5675 *
5676 * An invalid IRQ skips the IRQ registration and expects the host to
5677 * have set polling mode on the port. In this case, @irq_handler
5678 * should be NULL.
5679 *
5680 * LOCKING:
5681 * Inherited from calling layer (may sleep).
5682 *
5683 * RETURNS:
5684 * 0 on success, -errno otherwise.
5685 */
5686 int ata_host_activate(struct ata_host *host, int irq,
5687 irq_handler_t irq_handler, unsigned long irq_flags,
5688 struct scsi_host_template *sht)
5689 {
5690 int i, rc;
5691
5692 rc = ata_host_start(host);
5693 if (rc)
5694 return rc;
5695
5696 /* Special case for polling mode */
5697 if (!irq) {
5698 WARN_ON(irq_handler);
5699 return ata_host_register(host, sht);
5700 }
5701
5702 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5703 dev_driver_string(host->dev), host);
5704 if (rc)
5705 return rc;
5706
5707 for (i = 0; i < host->n_ports; i++)
5708 ata_port_desc(host->ports[i], "irq %d", irq);
5709
5710 rc = ata_host_register(host, sht);
5711 /* if failed, just free the IRQ and leave ports alone */
5712 if (rc)
5713 devm_free_irq(host->dev, irq, host);
5714
5715 return rc;
5716 }
5717
5718 /**
5719 * ata_port_detach - Detach ATA port in prepration of device removal
5720 * @ap: ATA port to be detached
5721 *
5722 * Detach all ATA devices and the associated SCSI devices of @ap;
5723 * then, remove the associated SCSI host. @ap is guaranteed to
5724 * be quiescent on return from this function.
5725 *
5726 * LOCKING:
5727 * Kernel thread context (may sleep).
5728 */
5729 static void ata_port_detach(struct ata_port *ap)
5730 {
5731 unsigned long flags;
5732 struct ata_link *link;
5733 struct ata_device *dev;
5734
5735 if (!ap->ops->error_handler)
5736 goto skip_eh;
5737
5738 /* tell EH we're leaving & flush EH */
5739 spin_lock_irqsave(ap->lock, flags);
5740 ap->pflags |= ATA_PFLAG_UNLOADING;
5741 spin_unlock_irqrestore(ap->lock, flags);
5742
5743 ata_port_wait_eh(ap);
5744
5745 /* EH is now guaranteed to see UNLOADING - EH context belongs
5746 * to us. Disable all existing devices.
5747 */
5748 ata_port_for_each_link(link, ap) {
5749 ata_link_for_each_dev(dev, link)
5750 ata_dev_disable(dev);
5751 }
5752
5753 /* Final freeze & EH. All in-flight commands are aborted. EH
5754 * will be skipped and retrials will be terminated with bad
5755 * target.
5756 */
5757 spin_lock_irqsave(ap->lock, flags);
5758 ata_port_freeze(ap); /* won't be thawed */
5759 spin_unlock_irqrestore(ap->lock, flags);
5760
5761 ata_port_wait_eh(ap);
5762 cancel_rearming_delayed_work(&ap->hotplug_task);
5763
5764 skip_eh:
5765 /* remove the associated SCSI host */
5766 scsi_remove_host(ap->scsi_host);
5767 }
5768
5769 /**
5770 * ata_host_detach - Detach all ports of an ATA host
5771 * @host: Host to detach
5772 *
5773 * Detach all ports of @host.
5774 *
5775 * LOCKING:
5776 * Kernel thread context (may sleep).
5777 */
5778 void ata_host_detach(struct ata_host *host)
5779 {
5780 int i;
5781
5782 for (i = 0; i < host->n_ports; i++)
5783 ata_port_detach(host->ports[i]);
5784
5785 /* the host is dead now, dissociate ACPI */
5786 ata_acpi_dissociate(host);
5787 }
5788
5789 #ifdef CONFIG_PCI
5790
5791 /**
5792 * ata_pci_remove_one - PCI layer callback for device removal
5793 * @pdev: PCI device that was removed
5794 *
5795 * PCI layer indicates to libata via this hook that hot-unplug or
5796 * module unload event has occurred. Detach all ports. Resource
5797 * release is handled via devres.
5798 *
5799 * LOCKING:
5800 * Inherited from PCI layer (may sleep).
5801 */
5802 void ata_pci_remove_one(struct pci_dev *pdev)
5803 {
5804 struct device *dev = &pdev->dev;
5805 struct ata_host *host = dev_get_drvdata(dev);
5806
5807 ata_host_detach(host);
5808 }
5809
5810 /* move to PCI subsystem */
5811 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5812 {
5813 unsigned long tmp = 0;
5814
5815 switch (bits->width) {
5816 case 1: {
5817 u8 tmp8 = 0;
5818 pci_read_config_byte(pdev, bits->reg, &tmp8);
5819 tmp = tmp8;
5820 break;
5821 }
5822 case 2: {
5823 u16 tmp16 = 0;
5824 pci_read_config_word(pdev, bits->reg, &tmp16);
5825 tmp = tmp16;
5826 break;
5827 }
5828 case 4: {
5829 u32 tmp32 = 0;
5830 pci_read_config_dword(pdev, bits->reg, &tmp32);
5831 tmp = tmp32;
5832 break;
5833 }
5834
5835 default:
5836 return -EINVAL;
5837 }
5838
5839 tmp &= bits->mask;
5840
5841 return (tmp == bits->val) ? 1 : 0;
5842 }
5843
5844 #ifdef CONFIG_PM
5845 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5846 {
5847 pci_save_state(pdev);
5848 pci_disable_device(pdev);
5849
5850 if (mesg.event & PM_EVENT_SLEEP)
5851 pci_set_power_state(pdev, PCI_D3hot);
5852 }
5853
5854 int ata_pci_device_do_resume(struct pci_dev *pdev)
5855 {
5856 int rc;
5857
5858 pci_set_power_state(pdev, PCI_D0);
5859 pci_restore_state(pdev);
5860
5861 rc = pcim_enable_device(pdev);
5862 if (rc) {
5863 dev_printk(KERN_ERR, &pdev->dev,
5864 "failed to enable device after resume (%d)\n", rc);
5865 return rc;
5866 }
5867
5868 pci_set_master(pdev);
5869 return 0;
5870 }
5871
5872 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5873 {
5874 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5875 int rc = 0;
5876
5877 rc = ata_host_suspend(host, mesg);
5878 if (rc)
5879 return rc;
5880
5881 ata_pci_device_do_suspend(pdev, mesg);
5882
5883 return 0;
5884 }
5885
5886 int ata_pci_device_resume(struct pci_dev *pdev)
5887 {
5888 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5889 int rc;
5890
5891 rc = ata_pci_device_do_resume(pdev);
5892 if (rc == 0)
5893 ata_host_resume(host);
5894 return rc;
5895 }
5896 #endif /* CONFIG_PM */
5897
5898 #endif /* CONFIG_PCI */
5899
5900 static int __init ata_parse_force_one(char **cur,
5901 struct ata_force_ent *force_ent,
5902 const char **reason)
5903 {
5904 /* FIXME: Currently, there's no way to tag init const data and
5905 * using __initdata causes build failure on some versions of
5906 * gcc. Once __initdataconst is implemented, add const to the
5907 * following structure.
5908 */
5909 static struct ata_force_param force_tbl[] __initdata = {
5910 { "40c", .cbl = ATA_CBL_PATA40 },
5911 { "80c", .cbl = ATA_CBL_PATA80 },
5912 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
5913 { "unk", .cbl = ATA_CBL_PATA_UNK },
5914 { "ign", .cbl = ATA_CBL_PATA_IGN },
5915 { "sata", .cbl = ATA_CBL_SATA },
5916 { "1.5Gbps", .spd_limit = 1 },
5917 { "3.0Gbps", .spd_limit = 2 },
5918 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
5919 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
5920 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
5921 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
5922 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
5923 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
5924 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
5925 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
5926 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
5927 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
5928 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
5929 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
5930 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
5931 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
5932 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5933 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5934 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5935 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5936 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5937 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5938 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5939 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5940 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5941 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5942 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5943 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5944 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5945 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5946 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5947 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5948 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5949 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5950 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5951 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5952 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5953 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
5954 };
5955 char *start = *cur, *p = *cur;
5956 char *id, *val, *endp;
5957 const struct ata_force_param *match_fp = NULL;
5958 int nr_matches = 0, i;
5959
5960 /* find where this param ends and update *cur */
5961 while (*p != '\0' && *p != ',')
5962 p++;
5963
5964 if (*p == '\0')
5965 *cur = p;
5966 else
5967 *cur = p + 1;
5968
5969 *p = '\0';
5970
5971 /* parse */
5972 p = strchr(start, ':');
5973 if (!p) {
5974 val = strstrip(start);
5975 goto parse_val;
5976 }
5977 *p = '\0';
5978
5979 id = strstrip(start);
5980 val = strstrip(p + 1);
5981
5982 /* parse id */
5983 p = strchr(id, '.');
5984 if (p) {
5985 *p++ = '\0';
5986 force_ent->device = simple_strtoul(p, &endp, 10);
5987 if (p == endp || *endp != '\0') {
5988 *reason = "invalid device";
5989 return -EINVAL;
5990 }
5991 }
5992
5993 force_ent->port = simple_strtoul(id, &endp, 10);
5994 if (p == endp || *endp != '\0') {
5995 *reason = "invalid port/link";
5996 return -EINVAL;
5997 }
5998
5999 parse_val:
6000 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6001 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6002 const struct ata_force_param *fp = &force_tbl[i];
6003
6004 if (strncasecmp(val, fp->name, strlen(val)))
6005 continue;
6006
6007 nr_matches++;
6008 match_fp = fp;
6009
6010 if (strcasecmp(val, fp->name) == 0) {
6011 nr_matches = 1;
6012 break;
6013 }
6014 }
6015
6016 if (!nr_matches) {
6017 *reason = "unknown value";
6018 return -EINVAL;
6019 }
6020 if (nr_matches > 1) {
6021 *reason = "ambigious value";
6022 return -EINVAL;
6023 }
6024
6025 force_ent->param = *match_fp;
6026
6027 return 0;
6028 }
6029
6030 static void __init ata_parse_force_param(void)
6031 {
6032 int idx = 0, size = 1;
6033 int last_port = -1, last_device = -1;
6034 char *p, *cur, *next;
6035
6036 /* calculate maximum number of params and allocate force_tbl */
6037 for (p = ata_force_param_buf; *p; p++)
6038 if (*p == ',')
6039 size++;
6040
6041 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6042 if (!ata_force_tbl) {
6043 printk(KERN_WARNING "ata: failed to extend force table, "
6044 "libata.force ignored\n");
6045 return;
6046 }
6047
6048 /* parse and populate the table */
6049 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6050 const char *reason = "";
6051 struct ata_force_ent te = { .port = -1, .device = -1 };
6052
6053 next = cur;
6054 if (ata_parse_force_one(&next, &te, &reason)) {
6055 printk(KERN_WARNING "ata: failed to parse force "
6056 "parameter \"%s\" (%s)\n",
6057 cur, reason);
6058 continue;
6059 }
6060
6061 if (te.port == -1) {
6062 te.port = last_port;
6063 te.device = last_device;
6064 }
6065
6066 ata_force_tbl[idx++] = te;
6067
6068 last_port = te.port;
6069 last_device = te.device;
6070 }
6071
6072 ata_force_tbl_size = idx;
6073 }
6074
6075 static int __init ata_init(void)
6076 {
6077 ata_parse_force_param();
6078
6079 ata_wq = create_workqueue("ata");
6080 if (!ata_wq)
6081 return -ENOMEM;
6082
6083 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6084 if (!ata_aux_wq) {
6085 destroy_workqueue(ata_wq);
6086 return -ENOMEM;
6087 }
6088
6089 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6090 return 0;
6091 }
6092
6093 static void __exit ata_exit(void)
6094 {
6095 kfree(ata_force_tbl);
6096 destroy_workqueue(ata_wq);
6097 destroy_workqueue(ata_aux_wq);
6098 }
6099
6100 subsys_initcall(ata_init);
6101 module_exit(ata_exit);
6102
6103 static unsigned long ratelimit_time;
6104 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6105
6106 int ata_ratelimit(void)
6107 {
6108 int rc;
6109 unsigned long flags;
6110
6111 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6112
6113 if (time_after(jiffies, ratelimit_time)) {
6114 rc = 1;
6115 ratelimit_time = jiffies + (HZ/5);
6116 } else
6117 rc = 0;
6118
6119 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6120
6121 return rc;
6122 }
6123
6124 /**
6125 * ata_wait_register - wait until register value changes
6126 * @reg: IO-mapped register
6127 * @mask: Mask to apply to read register value
6128 * @val: Wait condition
6129 * @interval: polling interval in milliseconds
6130 * @timeout: timeout in milliseconds
6131 *
6132 * Waiting for some bits of register to change is a common
6133 * operation for ATA controllers. This function reads 32bit LE
6134 * IO-mapped register @reg and tests for the following condition.
6135 *
6136 * (*@reg & mask) != val
6137 *
6138 * If the condition is met, it returns; otherwise, the process is
6139 * repeated after @interval_msec until timeout.
6140 *
6141 * LOCKING:
6142 * Kernel thread context (may sleep)
6143 *
6144 * RETURNS:
6145 * The final register value.
6146 */
6147 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6148 unsigned long interval, unsigned long timeout)
6149 {
6150 unsigned long deadline;
6151 u32 tmp;
6152
6153 tmp = ioread32(reg);
6154
6155 /* Calculate timeout _after_ the first read to make sure
6156 * preceding writes reach the controller before starting to
6157 * eat away the timeout.
6158 */
6159 deadline = ata_deadline(jiffies, timeout);
6160
6161 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6162 msleep(interval);
6163 tmp = ioread32(reg);
6164 }
6165
6166 return tmp;
6167 }
6168
6169 /*
6170 * Dummy port_ops
6171 */
6172 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6173 {
6174 return AC_ERR_SYSTEM;
6175 }
6176
6177 static void ata_dummy_error_handler(struct ata_port *ap)
6178 {
6179 /* truly dummy */
6180 }
6181
6182 struct ata_port_operations ata_dummy_port_ops = {
6183 .qc_prep = ata_noop_qc_prep,
6184 .qc_issue = ata_dummy_qc_issue,
6185 .error_handler = ata_dummy_error_handler,
6186 };
6187
6188 const struct ata_port_info ata_dummy_port_info = {
6189 .port_ops = &ata_dummy_port_ops,
6190 };
6191
6192 /*
6193 * libata is essentially a library of internal helper functions for
6194 * low-level ATA host controller drivers. As such, the API/ABI is
6195 * likely to change as new drivers are added and updated.
6196 * Do not depend on ABI/API stability.
6197 */
6198 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6199 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6200 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6201 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6202 EXPORT_SYMBOL_GPL(sata_port_ops);
6203 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6204 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6205 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6206 EXPORT_SYMBOL_GPL(ata_host_init);
6207 EXPORT_SYMBOL_GPL(ata_host_alloc);
6208 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6209 EXPORT_SYMBOL_GPL(ata_host_start);
6210 EXPORT_SYMBOL_GPL(ata_host_register);
6211 EXPORT_SYMBOL_GPL(ata_host_activate);
6212 EXPORT_SYMBOL_GPL(ata_host_detach);
6213 EXPORT_SYMBOL_GPL(ata_sg_init);
6214 EXPORT_SYMBOL_GPL(ata_qc_complete);
6215 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6216 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6217 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6218 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6219 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6220 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6221 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6222 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6223 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6224 EXPORT_SYMBOL_GPL(ata_mode_string);
6225 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6226 EXPORT_SYMBOL_GPL(ata_port_start);
6227 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6228 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6229 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6230 EXPORT_SYMBOL_GPL(ata_port_probe);
6231 EXPORT_SYMBOL_GPL(ata_dev_disable);
6232 EXPORT_SYMBOL_GPL(sata_set_spd);
6233 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6234 EXPORT_SYMBOL_GPL(sata_link_debounce);
6235 EXPORT_SYMBOL_GPL(sata_link_resume);
6236 EXPORT_SYMBOL_GPL(ata_std_prereset);
6237 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6238 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6239 EXPORT_SYMBOL_GPL(ata_std_postreset);
6240 EXPORT_SYMBOL_GPL(ata_dev_classify);
6241 EXPORT_SYMBOL_GPL(ata_dev_pair);
6242 EXPORT_SYMBOL_GPL(ata_port_disable);
6243 EXPORT_SYMBOL_GPL(ata_ratelimit);
6244 EXPORT_SYMBOL_GPL(ata_wait_register);
6245 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6246 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6247 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6248 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6249 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6250 EXPORT_SYMBOL_GPL(sata_scr_valid);
6251 EXPORT_SYMBOL_GPL(sata_scr_read);
6252 EXPORT_SYMBOL_GPL(sata_scr_write);
6253 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6254 EXPORT_SYMBOL_GPL(ata_link_online);
6255 EXPORT_SYMBOL_GPL(ata_link_offline);
6256 #ifdef CONFIG_PM
6257 EXPORT_SYMBOL_GPL(ata_host_suspend);
6258 EXPORT_SYMBOL_GPL(ata_host_resume);
6259 #endif /* CONFIG_PM */
6260 EXPORT_SYMBOL_GPL(ata_id_string);
6261 EXPORT_SYMBOL_GPL(ata_id_c_string);
6262 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6263
6264 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6265 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6266 EXPORT_SYMBOL_GPL(ata_timing_compute);
6267 EXPORT_SYMBOL_GPL(ata_timing_merge);
6268 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6269
6270 #ifdef CONFIG_PCI
6271 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6272 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6273 #ifdef CONFIG_PM
6274 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6275 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6276 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6277 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6278 #endif /* CONFIG_PM */
6279 #endif /* CONFIG_PCI */
6280
6281 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6282 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6283 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6284 EXPORT_SYMBOL_GPL(ata_port_desc);
6285 #ifdef CONFIG_PCI
6286 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6287 #endif /* CONFIG_PCI */
6288 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6289 EXPORT_SYMBOL_GPL(ata_link_abort);
6290 EXPORT_SYMBOL_GPL(ata_port_abort);
6291 EXPORT_SYMBOL_GPL(ata_port_freeze);
6292 EXPORT_SYMBOL_GPL(sata_async_notification);
6293 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6294 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6295 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6296 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6297 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6298 EXPORT_SYMBOL_GPL(ata_do_eh);
6299 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6300
6301 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6302 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6303 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6304 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6305 EXPORT_SYMBOL_GPL(ata_cable_sata);