]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/ata/libata-core.c
Merge branch 'for-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata
[mirror_ubuntu-zesty-kernel.git] / drivers / ata / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/time.h>
54 #include <linux/interrupt.h>
55 #include <linux/completion.h>
56 #include <linux/suspend.h>
57 #include <linux/workqueue.h>
58 #include <linux/scatterlist.h>
59 #include <linux/io.h>
60 #include <linux/async.h>
61 #include <linux/log2.h>
62 #include <linux/slab.h>
63 #include <linux/glob.h>
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_host.h>
67 #include <linux/libata.h>
68 #include <asm/byteorder.h>
69 #include <asm/unaligned.h>
70 #include <linux/cdrom.h>
71 #include <linux/ratelimit.h>
72 #include <linux/pm_runtime.h>
73 #include <linux/platform_device.h>
74
75 #define CREATE_TRACE_POINTS
76 #include <trace/events/libata.h>
77
78 #include "libata.h"
79 #include "libata-transport.h"
80
81 /* debounce timing parameters in msecs { interval, duration, timeout } */
82 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
83 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
84 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
85
86 const struct ata_port_operations ata_base_port_ops = {
87 .prereset = ata_std_prereset,
88 .postreset = ata_std_postreset,
89 .error_handler = ata_std_error_handler,
90 .sched_eh = ata_std_sched_eh,
91 .end_eh = ata_std_end_eh,
92 };
93
94 const struct ata_port_operations sata_port_ops = {
95 .inherits = &ata_base_port_ops,
96
97 .qc_defer = ata_std_qc_defer,
98 .hardreset = sata_std_hardreset,
99 };
100
101 static unsigned int ata_dev_init_params(struct ata_device *dev,
102 u16 heads, u16 sectors);
103 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
104 static void ata_dev_xfermask(struct ata_device *dev);
105 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
106
107 atomic_t ata_print_id = ATOMIC_INIT(0);
108
109 struct ata_force_param {
110 const char *name;
111 unsigned int cbl;
112 int spd_limit;
113 unsigned long xfer_mask;
114 unsigned int horkage_on;
115 unsigned int horkage_off;
116 unsigned int lflags;
117 };
118
119 struct ata_force_ent {
120 int port;
121 int device;
122 struct ata_force_param param;
123 };
124
125 static struct ata_force_ent *ata_force_tbl;
126 static int ata_force_tbl_size;
127
128 static char ata_force_param_buf[PAGE_SIZE] __initdata;
129 /* param_buf is thrown away after initialization, disallow read */
130 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
131 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
132
133 static int atapi_enabled = 1;
134 module_param(atapi_enabled, int, 0444);
135 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
136
137 static int atapi_dmadir = 0;
138 module_param(atapi_dmadir, int, 0444);
139 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
140
141 int atapi_passthru16 = 1;
142 module_param(atapi_passthru16, int, 0444);
143 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
144
145 int libata_fua = 0;
146 module_param_named(fua, libata_fua, int, 0444);
147 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
148
149 static int ata_ignore_hpa;
150 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
151 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
152
153 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
154 module_param_named(dma, libata_dma_mask, int, 0444);
155 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
156
157 static int ata_probe_timeout;
158 module_param(ata_probe_timeout, int, 0444);
159 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
160
161 int libata_noacpi = 0;
162 module_param_named(noacpi, libata_noacpi, int, 0444);
163 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
164
165 int libata_allow_tpm = 0;
166 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
167 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
168
169 static int atapi_an;
170 module_param(atapi_an, int, 0444);
171 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
172
173 MODULE_AUTHOR("Jeff Garzik");
174 MODULE_DESCRIPTION("Library module for ATA devices");
175 MODULE_LICENSE("GPL");
176 MODULE_VERSION(DRV_VERSION);
177
178
179 static bool ata_sstatus_online(u32 sstatus)
180 {
181 return (sstatus & 0xf) == 0x3;
182 }
183
184 /**
185 * ata_link_next - link iteration helper
186 * @link: the previous link, NULL to start
187 * @ap: ATA port containing links to iterate
188 * @mode: iteration mode, one of ATA_LITER_*
189 *
190 * LOCKING:
191 * Host lock or EH context.
192 *
193 * RETURNS:
194 * Pointer to the next link.
195 */
196 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
197 enum ata_link_iter_mode mode)
198 {
199 BUG_ON(mode != ATA_LITER_EDGE &&
200 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
201
202 /* NULL link indicates start of iteration */
203 if (!link)
204 switch (mode) {
205 case ATA_LITER_EDGE:
206 case ATA_LITER_PMP_FIRST:
207 if (sata_pmp_attached(ap))
208 return ap->pmp_link;
209 /* fall through */
210 case ATA_LITER_HOST_FIRST:
211 return &ap->link;
212 }
213
214 /* we just iterated over the host link, what's next? */
215 if (link == &ap->link)
216 switch (mode) {
217 case ATA_LITER_HOST_FIRST:
218 if (sata_pmp_attached(ap))
219 return ap->pmp_link;
220 /* fall through */
221 case ATA_LITER_PMP_FIRST:
222 if (unlikely(ap->slave_link))
223 return ap->slave_link;
224 /* fall through */
225 case ATA_LITER_EDGE:
226 return NULL;
227 }
228
229 /* slave_link excludes PMP */
230 if (unlikely(link == ap->slave_link))
231 return NULL;
232
233 /* we were over a PMP link */
234 if (++link < ap->pmp_link + ap->nr_pmp_links)
235 return link;
236
237 if (mode == ATA_LITER_PMP_FIRST)
238 return &ap->link;
239
240 return NULL;
241 }
242
243 /**
244 * ata_dev_next - device iteration helper
245 * @dev: the previous device, NULL to start
246 * @link: ATA link containing devices to iterate
247 * @mode: iteration mode, one of ATA_DITER_*
248 *
249 * LOCKING:
250 * Host lock or EH context.
251 *
252 * RETURNS:
253 * Pointer to the next device.
254 */
255 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
256 enum ata_dev_iter_mode mode)
257 {
258 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
259 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
260
261 /* NULL dev indicates start of iteration */
262 if (!dev)
263 switch (mode) {
264 case ATA_DITER_ENABLED:
265 case ATA_DITER_ALL:
266 dev = link->device;
267 goto check;
268 case ATA_DITER_ENABLED_REVERSE:
269 case ATA_DITER_ALL_REVERSE:
270 dev = link->device + ata_link_max_devices(link) - 1;
271 goto check;
272 }
273
274 next:
275 /* move to the next one */
276 switch (mode) {
277 case ATA_DITER_ENABLED:
278 case ATA_DITER_ALL:
279 if (++dev < link->device + ata_link_max_devices(link))
280 goto check;
281 return NULL;
282 case ATA_DITER_ENABLED_REVERSE:
283 case ATA_DITER_ALL_REVERSE:
284 if (--dev >= link->device)
285 goto check;
286 return NULL;
287 }
288
289 check:
290 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
291 !ata_dev_enabled(dev))
292 goto next;
293 return dev;
294 }
295
296 /**
297 * ata_dev_phys_link - find physical link for a device
298 * @dev: ATA device to look up physical link for
299 *
300 * Look up physical link which @dev is attached to. Note that
301 * this is different from @dev->link only when @dev is on slave
302 * link. For all other cases, it's the same as @dev->link.
303 *
304 * LOCKING:
305 * Don't care.
306 *
307 * RETURNS:
308 * Pointer to the found physical link.
309 */
310 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
311 {
312 struct ata_port *ap = dev->link->ap;
313
314 if (!ap->slave_link)
315 return dev->link;
316 if (!dev->devno)
317 return &ap->link;
318 return ap->slave_link;
319 }
320
321 /**
322 * ata_force_cbl - force cable type according to libata.force
323 * @ap: ATA port of interest
324 *
325 * Force cable type according to libata.force and whine about it.
326 * The last entry which has matching port number is used, so it
327 * can be specified as part of device force parameters. For
328 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
329 * same effect.
330 *
331 * LOCKING:
332 * EH context.
333 */
334 void ata_force_cbl(struct ata_port *ap)
335 {
336 int i;
337
338 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
339 const struct ata_force_ent *fe = &ata_force_tbl[i];
340
341 if (fe->port != -1 && fe->port != ap->print_id)
342 continue;
343
344 if (fe->param.cbl == ATA_CBL_NONE)
345 continue;
346
347 ap->cbl = fe->param.cbl;
348 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
349 return;
350 }
351 }
352
353 /**
354 * ata_force_link_limits - force link limits according to libata.force
355 * @link: ATA link of interest
356 *
357 * Force link flags and SATA spd limit according to libata.force
358 * and whine about it. When only the port part is specified
359 * (e.g. 1:), the limit applies to all links connected to both
360 * the host link and all fan-out ports connected via PMP. If the
361 * device part is specified as 0 (e.g. 1.00:), it specifies the
362 * first fan-out link not the host link. Device number 15 always
363 * points to the host link whether PMP is attached or not. If the
364 * controller has slave link, device number 16 points to it.
365 *
366 * LOCKING:
367 * EH context.
368 */
369 static void ata_force_link_limits(struct ata_link *link)
370 {
371 bool did_spd = false;
372 int linkno = link->pmp;
373 int i;
374
375 if (ata_is_host_link(link))
376 linkno += 15;
377
378 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
379 const struct ata_force_ent *fe = &ata_force_tbl[i];
380
381 if (fe->port != -1 && fe->port != link->ap->print_id)
382 continue;
383
384 if (fe->device != -1 && fe->device != linkno)
385 continue;
386
387 /* only honor the first spd limit */
388 if (!did_spd && fe->param.spd_limit) {
389 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
390 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
391 fe->param.name);
392 did_spd = true;
393 }
394
395 /* let lflags stack */
396 if (fe->param.lflags) {
397 link->flags |= fe->param.lflags;
398 ata_link_notice(link,
399 "FORCE: link flag 0x%x forced -> 0x%x\n",
400 fe->param.lflags, link->flags);
401 }
402 }
403 }
404
405 /**
406 * ata_force_xfermask - force xfermask according to libata.force
407 * @dev: ATA device of interest
408 *
409 * Force xfer_mask according to libata.force and whine about it.
410 * For consistency with link selection, device number 15 selects
411 * the first device connected to the host link.
412 *
413 * LOCKING:
414 * EH context.
415 */
416 static void ata_force_xfermask(struct ata_device *dev)
417 {
418 int devno = dev->link->pmp + dev->devno;
419 int alt_devno = devno;
420 int i;
421
422 /* allow n.15/16 for devices attached to host port */
423 if (ata_is_host_link(dev->link))
424 alt_devno += 15;
425
426 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
427 const struct ata_force_ent *fe = &ata_force_tbl[i];
428 unsigned long pio_mask, mwdma_mask, udma_mask;
429
430 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
431 continue;
432
433 if (fe->device != -1 && fe->device != devno &&
434 fe->device != alt_devno)
435 continue;
436
437 if (!fe->param.xfer_mask)
438 continue;
439
440 ata_unpack_xfermask(fe->param.xfer_mask,
441 &pio_mask, &mwdma_mask, &udma_mask);
442 if (udma_mask)
443 dev->udma_mask = udma_mask;
444 else if (mwdma_mask) {
445 dev->udma_mask = 0;
446 dev->mwdma_mask = mwdma_mask;
447 } else {
448 dev->udma_mask = 0;
449 dev->mwdma_mask = 0;
450 dev->pio_mask = pio_mask;
451 }
452
453 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
454 fe->param.name);
455 return;
456 }
457 }
458
459 /**
460 * ata_force_horkage - force horkage according to libata.force
461 * @dev: ATA device of interest
462 *
463 * Force horkage according to libata.force and whine about it.
464 * For consistency with link selection, device number 15 selects
465 * the first device connected to the host link.
466 *
467 * LOCKING:
468 * EH context.
469 */
470 static void ata_force_horkage(struct ata_device *dev)
471 {
472 int devno = dev->link->pmp + dev->devno;
473 int alt_devno = devno;
474 int i;
475
476 /* allow n.15/16 for devices attached to host port */
477 if (ata_is_host_link(dev->link))
478 alt_devno += 15;
479
480 for (i = 0; i < ata_force_tbl_size; i++) {
481 const struct ata_force_ent *fe = &ata_force_tbl[i];
482
483 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
484 continue;
485
486 if (fe->device != -1 && fe->device != devno &&
487 fe->device != alt_devno)
488 continue;
489
490 if (!(~dev->horkage & fe->param.horkage_on) &&
491 !(dev->horkage & fe->param.horkage_off))
492 continue;
493
494 dev->horkage |= fe->param.horkage_on;
495 dev->horkage &= ~fe->param.horkage_off;
496
497 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
498 fe->param.name);
499 }
500 }
501
502 /**
503 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
504 * @opcode: SCSI opcode
505 *
506 * Determine ATAPI command type from @opcode.
507 *
508 * LOCKING:
509 * None.
510 *
511 * RETURNS:
512 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
513 */
514 int atapi_cmd_type(u8 opcode)
515 {
516 switch (opcode) {
517 case GPCMD_READ_10:
518 case GPCMD_READ_12:
519 return ATAPI_READ;
520
521 case GPCMD_WRITE_10:
522 case GPCMD_WRITE_12:
523 case GPCMD_WRITE_AND_VERIFY_10:
524 return ATAPI_WRITE;
525
526 case GPCMD_READ_CD:
527 case GPCMD_READ_CD_MSF:
528 return ATAPI_READ_CD;
529
530 case ATA_16:
531 case ATA_12:
532 if (atapi_passthru16)
533 return ATAPI_PASS_THRU;
534 /* fall thru */
535 default:
536 return ATAPI_MISC;
537 }
538 }
539
540 /**
541 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
542 * @tf: Taskfile to convert
543 * @pmp: Port multiplier port
544 * @is_cmd: This FIS is for command
545 * @fis: Buffer into which data will output
546 *
547 * Converts a standard ATA taskfile to a Serial ATA
548 * FIS structure (Register - Host to Device).
549 *
550 * LOCKING:
551 * Inherited from caller.
552 */
553 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
554 {
555 fis[0] = 0x27; /* Register - Host to Device FIS */
556 fis[1] = pmp & 0xf; /* Port multiplier number*/
557 if (is_cmd)
558 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
559
560 fis[2] = tf->command;
561 fis[3] = tf->feature;
562
563 fis[4] = tf->lbal;
564 fis[5] = tf->lbam;
565 fis[6] = tf->lbah;
566 fis[7] = tf->device;
567
568 fis[8] = tf->hob_lbal;
569 fis[9] = tf->hob_lbam;
570 fis[10] = tf->hob_lbah;
571 fis[11] = tf->hob_feature;
572
573 fis[12] = tf->nsect;
574 fis[13] = tf->hob_nsect;
575 fis[14] = 0;
576 fis[15] = tf->ctl;
577
578 fis[16] = tf->auxiliary & 0xff;
579 fis[17] = (tf->auxiliary >> 8) & 0xff;
580 fis[18] = (tf->auxiliary >> 16) & 0xff;
581 fis[19] = (tf->auxiliary >> 24) & 0xff;
582 }
583
584 /**
585 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
586 * @fis: Buffer from which data will be input
587 * @tf: Taskfile to output
588 *
589 * Converts a serial ATA FIS structure to a standard ATA taskfile.
590 *
591 * LOCKING:
592 * Inherited from caller.
593 */
594
595 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
596 {
597 tf->command = fis[2]; /* status */
598 tf->feature = fis[3]; /* error */
599
600 tf->lbal = fis[4];
601 tf->lbam = fis[5];
602 tf->lbah = fis[6];
603 tf->device = fis[7];
604
605 tf->hob_lbal = fis[8];
606 tf->hob_lbam = fis[9];
607 tf->hob_lbah = fis[10];
608
609 tf->nsect = fis[12];
610 tf->hob_nsect = fis[13];
611 }
612
613 static const u8 ata_rw_cmds[] = {
614 /* pio multi */
615 ATA_CMD_READ_MULTI,
616 ATA_CMD_WRITE_MULTI,
617 ATA_CMD_READ_MULTI_EXT,
618 ATA_CMD_WRITE_MULTI_EXT,
619 0,
620 0,
621 0,
622 ATA_CMD_WRITE_MULTI_FUA_EXT,
623 /* pio */
624 ATA_CMD_PIO_READ,
625 ATA_CMD_PIO_WRITE,
626 ATA_CMD_PIO_READ_EXT,
627 ATA_CMD_PIO_WRITE_EXT,
628 0,
629 0,
630 0,
631 0,
632 /* dma */
633 ATA_CMD_READ,
634 ATA_CMD_WRITE,
635 ATA_CMD_READ_EXT,
636 ATA_CMD_WRITE_EXT,
637 0,
638 0,
639 0,
640 ATA_CMD_WRITE_FUA_EXT
641 };
642
643 /**
644 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
645 * @tf: command to examine and configure
646 * @dev: device tf belongs to
647 *
648 * Examine the device configuration and tf->flags to calculate
649 * the proper read/write commands and protocol to use.
650 *
651 * LOCKING:
652 * caller.
653 */
654 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
655 {
656 u8 cmd;
657
658 int index, fua, lba48, write;
659
660 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
661 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
662 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
663
664 if (dev->flags & ATA_DFLAG_PIO) {
665 tf->protocol = ATA_PROT_PIO;
666 index = dev->multi_count ? 0 : 8;
667 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
668 /* Unable to use DMA due to host limitation */
669 tf->protocol = ATA_PROT_PIO;
670 index = dev->multi_count ? 0 : 8;
671 } else {
672 tf->protocol = ATA_PROT_DMA;
673 index = 16;
674 }
675
676 cmd = ata_rw_cmds[index + fua + lba48 + write];
677 if (cmd) {
678 tf->command = cmd;
679 return 0;
680 }
681 return -1;
682 }
683
684 /**
685 * ata_tf_read_block - Read block address from ATA taskfile
686 * @tf: ATA taskfile of interest
687 * @dev: ATA device @tf belongs to
688 *
689 * LOCKING:
690 * None.
691 *
692 * Read block address from @tf. This function can handle all
693 * three address formats - LBA, LBA48 and CHS. tf->protocol and
694 * flags select the address format to use.
695 *
696 * RETURNS:
697 * Block address read from @tf.
698 */
699 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
700 {
701 u64 block = 0;
702
703 if (tf->flags & ATA_TFLAG_LBA) {
704 if (tf->flags & ATA_TFLAG_LBA48) {
705 block |= (u64)tf->hob_lbah << 40;
706 block |= (u64)tf->hob_lbam << 32;
707 block |= (u64)tf->hob_lbal << 24;
708 } else
709 block |= (tf->device & 0xf) << 24;
710
711 block |= tf->lbah << 16;
712 block |= tf->lbam << 8;
713 block |= tf->lbal;
714 } else {
715 u32 cyl, head, sect;
716
717 cyl = tf->lbam | (tf->lbah << 8);
718 head = tf->device & 0xf;
719 sect = tf->lbal;
720
721 if (!sect) {
722 ata_dev_warn(dev,
723 "device reported invalid CHS sector 0\n");
724 return U64_MAX;
725 }
726
727 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
728 }
729
730 return block;
731 }
732
733 /**
734 * ata_build_rw_tf - Build ATA taskfile for given read/write request
735 * @tf: Target ATA taskfile
736 * @dev: ATA device @tf belongs to
737 * @block: Block address
738 * @n_block: Number of blocks
739 * @tf_flags: RW/FUA etc...
740 * @tag: tag
741 *
742 * LOCKING:
743 * None.
744 *
745 * Build ATA taskfile @tf for read/write request described by
746 * @block, @n_block, @tf_flags and @tag on @dev.
747 *
748 * RETURNS:
749 *
750 * 0 on success, -ERANGE if the request is too large for @dev,
751 * -EINVAL if the request is invalid.
752 */
753 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
754 u64 block, u32 n_block, unsigned int tf_flags,
755 unsigned int tag)
756 {
757 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
758 tf->flags |= tf_flags;
759
760 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
761 /* yay, NCQ */
762 if (!lba_48_ok(block, n_block))
763 return -ERANGE;
764
765 tf->protocol = ATA_PROT_NCQ;
766 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
767
768 if (tf->flags & ATA_TFLAG_WRITE)
769 tf->command = ATA_CMD_FPDMA_WRITE;
770 else
771 tf->command = ATA_CMD_FPDMA_READ;
772
773 tf->nsect = tag << 3;
774 tf->hob_feature = (n_block >> 8) & 0xff;
775 tf->feature = n_block & 0xff;
776
777 tf->hob_lbah = (block >> 40) & 0xff;
778 tf->hob_lbam = (block >> 32) & 0xff;
779 tf->hob_lbal = (block >> 24) & 0xff;
780 tf->lbah = (block >> 16) & 0xff;
781 tf->lbam = (block >> 8) & 0xff;
782 tf->lbal = block & 0xff;
783
784 tf->device = ATA_LBA;
785 if (tf->flags & ATA_TFLAG_FUA)
786 tf->device |= 1 << 7;
787 } else if (dev->flags & ATA_DFLAG_LBA) {
788 tf->flags |= ATA_TFLAG_LBA;
789
790 if (lba_28_ok(block, n_block)) {
791 /* use LBA28 */
792 tf->device |= (block >> 24) & 0xf;
793 } else if (lba_48_ok(block, n_block)) {
794 if (!(dev->flags & ATA_DFLAG_LBA48))
795 return -ERANGE;
796
797 /* use LBA48 */
798 tf->flags |= ATA_TFLAG_LBA48;
799
800 tf->hob_nsect = (n_block >> 8) & 0xff;
801
802 tf->hob_lbah = (block >> 40) & 0xff;
803 tf->hob_lbam = (block >> 32) & 0xff;
804 tf->hob_lbal = (block >> 24) & 0xff;
805 } else
806 /* request too large even for LBA48 */
807 return -ERANGE;
808
809 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
810 return -EINVAL;
811
812 tf->nsect = n_block & 0xff;
813
814 tf->lbah = (block >> 16) & 0xff;
815 tf->lbam = (block >> 8) & 0xff;
816 tf->lbal = block & 0xff;
817
818 tf->device |= ATA_LBA;
819 } else {
820 /* CHS */
821 u32 sect, head, cyl, track;
822
823 /* The request -may- be too large for CHS addressing. */
824 if (!lba_28_ok(block, n_block))
825 return -ERANGE;
826
827 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
828 return -EINVAL;
829
830 /* Convert LBA to CHS */
831 track = (u32)block / dev->sectors;
832 cyl = track / dev->heads;
833 head = track % dev->heads;
834 sect = (u32)block % dev->sectors + 1;
835
836 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
837 (u32)block, track, cyl, head, sect);
838
839 /* Check whether the converted CHS can fit.
840 Cylinder: 0-65535
841 Head: 0-15
842 Sector: 1-255*/
843 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
844 return -ERANGE;
845
846 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
847 tf->lbal = sect;
848 tf->lbam = cyl;
849 tf->lbah = cyl >> 8;
850 tf->device |= head;
851 }
852
853 return 0;
854 }
855
856 /**
857 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
858 * @pio_mask: pio_mask
859 * @mwdma_mask: mwdma_mask
860 * @udma_mask: udma_mask
861 *
862 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
863 * unsigned int xfer_mask.
864 *
865 * LOCKING:
866 * None.
867 *
868 * RETURNS:
869 * Packed xfer_mask.
870 */
871 unsigned long ata_pack_xfermask(unsigned long pio_mask,
872 unsigned long mwdma_mask,
873 unsigned long udma_mask)
874 {
875 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
876 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
877 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
878 }
879
880 /**
881 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
882 * @xfer_mask: xfer_mask to unpack
883 * @pio_mask: resulting pio_mask
884 * @mwdma_mask: resulting mwdma_mask
885 * @udma_mask: resulting udma_mask
886 *
887 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
888 * Any NULL destination masks will be ignored.
889 */
890 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
891 unsigned long *mwdma_mask, unsigned long *udma_mask)
892 {
893 if (pio_mask)
894 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
895 if (mwdma_mask)
896 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
897 if (udma_mask)
898 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
899 }
900
901 static const struct ata_xfer_ent {
902 int shift, bits;
903 u8 base;
904 } ata_xfer_tbl[] = {
905 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
906 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
907 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
908 { -1, },
909 };
910
911 /**
912 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
913 * @xfer_mask: xfer_mask of interest
914 *
915 * Return matching XFER_* value for @xfer_mask. Only the highest
916 * bit of @xfer_mask is considered.
917 *
918 * LOCKING:
919 * None.
920 *
921 * RETURNS:
922 * Matching XFER_* value, 0xff if no match found.
923 */
924 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
925 {
926 int highbit = fls(xfer_mask) - 1;
927 const struct ata_xfer_ent *ent;
928
929 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
930 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
931 return ent->base + highbit - ent->shift;
932 return 0xff;
933 }
934
935 /**
936 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
937 * @xfer_mode: XFER_* of interest
938 *
939 * Return matching xfer_mask for @xfer_mode.
940 *
941 * LOCKING:
942 * None.
943 *
944 * RETURNS:
945 * Matching xfer_mask, 0 if no match found.
946 */
947 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
948 {
949 const struct ata_xfer_ent *ent;
950
951 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
952 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
953 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
954 & ~((1 << ent->shift) - 1);
955 return 0;
956 }
957
958 /**
959 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
960 * @xfer_mode: XFER_* of interest
961 *
962 * Return matching xfer_shift for @xfer_mode.
963 *
964 * LOCKING:
965 * None.
966 *
967 * RETURNS:
968 * Matching xfer_shift, -1 if no match found.
969 */
970 int ata_xfer_mode2shift(unsigned long xfer_mode)
971 {
972 const struct ata_xfer_ent *ent;
973
974 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
975 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
976 return ent->shift;
977 return -1;
978 }
979
980 /**
981 * ata_mode_string - convert xfer_mask to string
982 * @xfer_mask: mask of bits supported; only highest bit counts.
983 *
984 * Determine string which represents the highest speed
985 * (highest bit in @modemask).
986 *
987 * LOCKING:
988 * None.
989 *
990 * RETURNS:
991 * Constant C string representing highest speed listed in
992 * @mode_mask, or the constant C string "<n/a>".
993 */
994 const char *ata_mode_string(unsigned long xfer_mask)
995 {
996 static const char * const xfer_mode_str[] = {
997 "PIO0",
998 "PIO1",
999 "PIO2",
1000 "PIO3",
1001 "PIO4",
1002 "PIO5",
1003 "PIO6",
1004 "MWDMA0",
1005 "MWDMA1",
1006 "MWDMA2",
1007 "MWDMA3",
1008 "MWDMA4",
1009 "UDMA/16",
1010 "UDMA/25",
1011 "UDMA/33",
1012 "UDMA/44",
1013 "UDMA/66",
1014 "UDMA/100",
1015 "UDMA/133",
1016 "UDMA7",
1017 };
1018 int highbit;
1019
1020 highbit = fls(xfer_mask) - 1;
1021 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1022 return xfer_mode_str[highbit];
1023 return "<n/a>";
1024 }
1025
1026 const char *sata_spd_string(unsigned int spd)
1027 {
1028 static const char * const spd_str[] = {
1029 "1.5 Gbps",
1030 "3.0 Gbps",
1031 "6.0 Gbps",
1032 };
1033
1034 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1035 return "<unknown>";
1036 return spd_str[spd - 1];
1037 }
1038
1039 /**
1040 * ata_dev_classify - determine device type based on ATA-spec signature
1041 * @tf: ATA taskfile register set for device to be identified
1042 *
1043 * Determine from taskfile register contents whether a device is
1044 * ATA or ATAPI, as per "Signature and persistence" section
1045 * of ATA/PI spec (volume 1, sect 5.14).
1046 *
1047 * LOCKING:
1048 * None.
1049 *
1050 * RETURNS:
1051 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1052 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1053 */
1054 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1055 {
1056 /* Apple's open source Darwin code hints that some devices only
1057 * put a proper signature into the LBA mid/high registers,
1058 * So, we only check those. It's sufficient for uniqueness.
1059 *
1060 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1061 * signatures for ATA and ATAPI devices attached on SerialATA,
1062 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1063 * spec has never mentioned about using different signatures
1064 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1065 * Multiplier specification began to use 0x69/0x96 to identify
1066 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1067 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1068 * 0x69/0x96 shortly and described them as reserved for
1069 * SerialATA.
1070 *
1071 * We follow the current spec and consider that 0x69/0x96
1072 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1073 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1074 * SEMB signature. This is worked around in
1075 * ata_dev_read_id().
1076 */
1077 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1078 DPRINTK("found ATA device by sig\n");
1079 return ATA_DEV_ATA;
1080 }
1081
1082 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1083 DPRINTK("found ATAPI device by sig\n");
1084 return ATA_DEV_ATAPI;
1085 }
1086
1087 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1088 DPRINTK("found PMP device by sig\n");
1089 return ATA_DEV_PMP;
1090 }
1091
1092 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1093 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1094 return ATA_DEV_SEMB;
1095 }
1096
1097 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1098 DPRINTK("found ZAC device by sig\n");
1099 return ATA_DEV_ZAC;
1100 }
1101
1102 DPRINTK("unknown device\n");
1103 return ATA_DEV_UNKNOWN;
1104 }
1105
1106 /**
1107 * ata_id_string - Convert IDENTIFY DEVICE page into string
1108 * @id: IDENTIFY DEVICE results we will examine
1109 * @s: string into which data is output
1110 * @ofs: offset into identify device page
1111 * @len: length of string to return. must be an even number.
1112 *
1113 * The strings in the IDENTIFY DEVICE page are broken up into
1114 * 16-bit chunks. Run through the string, and output each
1115 * 8-bit chunk linearly, regardless of platform.
1116 *
1117 * LOCKING:
1118 * caller.
1119 */
1120
1121 void ata_id_string(const u16 *id, unsigned char *s,
1122 unsigned int ofs, unsigned int len)
1123 {
1124 unsigned int c;
1125
1126 BUG_ON(len & 1);
1127
1128 while (len > 0) {
1129 c = id[ofs] >> 8;
1130 *s = c;
1131 s++;
1132
1133 c = id[ofs] & 0xff;
1134 *s = c;
1135 s++;
1136
1137 ofs++;
1138 len -= 2;
1139 }
1140 }
1141
1142 /**
1143 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1144 * @id: IDENTIFY DEVICE results we will examine
1145 * @s: string into which data is output
1146 * @ofs: offset into identify device page
1147 * @len: length of string to return. must be an odd number.
1148 *
1149 * This function is identical to ata_id_string except that it
1150 * trims trailing spaces and terminates the resulting string with
1151 * null. @len must be actual maximum length (even number) + 1.
1152 *
1153 * LOCKING:
1154 * caller.
1155 */
1156 void ata_id_c_string(const u16 *id, unsigned char *s,
1157 unsigned int ofs, unsigned int len)
1158 {
1159 unsigned char *p;
1160
1161 ata_id_string(id, s, ofs, len - 1);
1162
1163 p = s + strnlen(s, len - 1);
1164 while (p > s && p[-1] == ' ')
1165 p--;
1166 *p = '\0';
1167 }
1168
1169 static u64 ata_id_n_sectors(const u16 *id)
1170 {
1171 if (ata_id_has_lba(id)) {
1172 if (ata_id_has_lba48(id))
1173 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1174 else
1175 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1176 } else {
1177 if (ata_id_current_chs_valid(id))
1178 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1179 id[ATA_ID_CUR_SECTORS];
1180 else
1181 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1182 id[ATA_ID_SECTORS];
1183 }
1184 }
1185
1186 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1187 {
1188 u64 sectors = 0;
1189
1190 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1191 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1192 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1193 sectors |= (tf->lbah & 0xff) << 16;
1194 sectors |= (tf->lbam & 0xff) << 8;
1195 sectors |= (tf->lbal & 0xff);
1196
1197 return sectors;
1198 }
1199
1200 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1201 {
1202 u64 sectors = 0;
1203
1204 sectors |= (tf->device & 0x0f) << 24;
1205 sectors |= (tf->lbah & 0xff) << 16;
1206 sectors |= (tf->lbam & 0xff) << 8;
1207 sectors |= (tf->lbal & 0xff);
1208
1209 return sectors;
1210 }
1211
1212 /**
1213 * ata_read_native_max_address - Read native max address
1214 * @dev: target device
1215 * @max_sectors: out parameter for the result native max address
1216 *
1217 * Perform an LBA48 or LBA28 native size query upon the device in
1218 * question.
1219 *
1220 * RETURNS:
1221 * 0 on success, -EACCES if command is aborted by the drive.
1222 * -EIO on other errors.
1223 */
1224 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1225 {
1226 unsigned int err_mask;
1227 struct ata_taskfile tf;
1228 int lba48 = ata_id_has_lba48(dev->id);
1229
1230 ata_tf_init(dev, &tf);
1231
1232 /* always clear all address registers */
1233 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1234
1235 if (lba48) {
1236 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1237 tf.flags |= ATA_TFLAG_LBA48;
1238 } else
1239 tf.command = ATA_CMD_READ_NATIVE_MAX;
1240
1241 tf.protocol = ATA_PROT_NODATA;
1242 tf.device |= ATA_LBA;
1243
1244 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1245 if (err_mask) {
1246 ata_dev_warn(dev,
1247 "failed to read native max address (err_mask=0x%x)\n",
1248 err_mask);
1249 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1250 return -EACCES;
1251 return -EIO;
1252 }
1253
1254 if (lba48)
1255 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1256 else
1257 *max_sectors = ata_tf_to_lba(&tf) + 1;
1258 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1259 (*max_sectors)--;
1260 return 0;
1261 }
1262
1263 /**
1264 * ata_set_max_sectors - Set max sectors
1265 * @dev: target device
1266 * @new_sectors: new max sectors value to set for the device
1267 *
1268 * Set max sectors of @dev to @new_sectors.
1269 *
1270 * RETURNS:
1271 * 0 on success, -EACCES if command is aborted or denied (due to
1272 * previous non-volatile SET_MAX) by the drive. -EIO on other
1273 * errors.
1274 */
1275 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1276 {
1277 unsigned int err_mask;
1278 struct ata_taskfile tf;
1279 int lba48 = ata_id_has_lba48(dev->id);
1280
1281 new_sectors--;
1282
1283 ata_tf_init(dev, &tf);
1284
1285 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1286
1287 if (lba48) {
1288 tf.command = ATA_CMD_SET_MAX_EXT;
1289 tf.flags |= ATA_TFLAG_LBA48;
1290
1291 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1292 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1293 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1294 } else {
1295 tf.command = ATA_CMD_SET_MAX;
1296
1297 tf.device |= (new_sectors >> 24) & 0xf;
1298 }
1299
1300 tf.protocol = ATA_PROT_NODATA;
1301 tf.device |= ATA_LBA;
1302
1303 tf.lbal = (new_sectors >> 0) & 0xff;
1304 tf.lbam = (new_sectors >> 8) & 0xff;
1305 tf.lbah = (new_sectors >> 16) & 0xff;
1306
1307 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1308 if (err_mask) {
1309 ata_dev_warn(dev,
1310 "failed to set max address (err_mask=0x%x)\n",
1311 err_mask);
1312 if (err_mask == AC_ERR_DEV &&
1313 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1314 return -EACCES;
1315 return -EIO;
1316 }
1317
1318 return 0;
1319 }
1320
1321 /**
1322 * ata_hpa_resize - Resize a device with an HPA set
1323 * @dev: Device to resize
1324 *
1325 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1326 * it if required to the full size of the media. The caller must check
1327 * the drive has the HPA feature set enabled.
1328 *
1329 * RETURNS:
1330 * 0 on success, -errno on failure.
1331 */
1332 static int ata_hpa_resize(struct ata_device *dev)
1333 {
1334 struct ata_eh_context *ehc = &dev->link->eh_context;
1335 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1336 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1337 u64 sectors = ata_id_n_sectors(dev->id);
1338 u64 native_sectors;
1339 int rc;
1340
1341 /* do we need to do it? */
1342 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1343 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1344 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1345 return 0;
1346
1347 /* read native max address */
1348 rc = ata_read_native_max_address(dev, &native_sectors);
1349 if (rc) {
1350 /* If device aborted the command or HPA isn't going to
1351 * be unlocked, skip HPA resizing.
1352 */
1353 if (rc == -EACCES || !unlock_hpa) {
1354 ata_dev_warn(dev,
1355 "HPA support seems broken, skipping HPA handling\n");
1356 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1357
1358 /* we can continue if device aborted the command */
1359 if (rc == -EACCES)
1360 rc = 0;
1361 }
1362
1363 return rc;
1364 }
1365 dev->n_native_sectors = native_sectors;
1366
1367 /* nothing to do? */
1368 if (native_sectors <= sectors || !unlock_hpa) {
1369 if (!print_info || native_sectors == sectors)
1370 return 0;
1371
1372 if (native_sectors > sectors)
1373 ata_dev_info(dev,
1374 "HPA detected: current %llu, native %llu\n",
1375 (unsigned long long)sectors,
1376 (unsigned long long)native_sectors);
1377 else if (native_sectors < sectors)
1378 ata_dev_warn(dev,
1379 "native sectors (%llu) is smaller than sectors (%llu)\n",
1380 (unsigned long long)native_sectors,
1381 (unsigned long long)sectors);
1382 return 0;
1383 }
1384
1385 /* let's unlock HPA */
1386 rc = ata_set_max_sectors(dev, native_sectors);
1387 if (rc == -EACCES) {
1388 /* if device aborted the command, skip HPA resizing */
1389 ata_dev_warn(dev,
1390 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1391 (unsigned long long)sectors,
1392 (unsigned long long)native_sectors);
1393 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1394 return 0;
1395 } else if (rc)
1396 return rc;
1397
1398 /* re-read IDENTIFY data */
1399 rc = ata_dev_reread_id(dev, 0);
1400 if (rc) {
1401 ata_dev_err(dev,
1402 "failed to re-read IDENTIFY data after HPA resizing\n");
1403 return rc;
1404 }
1405
1406 if (print_info) {
1407 u64 new_sectors = ata_id_n_sectors(dev->id);
1408 ata_dev_info(dev,
1409 "HPA unlocked: %llu -> %llu, native %llu\n",
1410 (unsigned long long)sectors,
1411 (unsigned long long)new_sectors,
1412 (unsigned long long)native_sectors);
1413 }
1414
1415 return 0;
1416 }
1417
1418 /**
1419 * ata_dump_id - IDENTIFY DEVICE info debugging output
1420 * @id: IDENTIFY DEVICE page to dump
1421 *
1422 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1423 * page.
1424 *
1425 * LOCKING:
1426 * caller.
1427 */
1428
1429 static inline void ata_dump_id(const u16 *id)
1430 {
1431 DPRINTK("49==0x%04x "
1432 "53==0x%04x "
1433 "63==0x%04x "
1434 "64==0x%04x "
1435 "75==0x%04x \n",
1436 id[49],
1437 id[53],
1438 id[63],
1439 id[64],
1440 id[75]);
1441 DPRINTK("80==0x%04x "
1442 "81==0x%04x "
1443 "82==0x%04x "
1444 "83==0x%04x "
1445 "84==0x%04x \n",
1446 id[80],
1447 id[81],
1448 id[82],
1449 id[83],
1450 id[84]);
1451 DPRINTK("88==0x%04x "
1452 "93==0x%04x\n",
1453 id[88],
1454 id[93]);
1455 }
1456
1457 /**
1458 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1459 * @id: IDENTIFY data to compute xfer mask from
1460 *
1461 * Compute the xfermask for this device. This is not as trivial
1462 * as it seems if we must consider early devices correctly.
1463 *
1464 * FIXME: pre IDE drive timing (do we care ?).
1465 *
1466 * LOCKING:
1467 * None.
1468 *
1469 * RETURNS:
1470 * Computed xfermask
1471 */
1472 unsigned long ata_id_xfermask(const u16 *id)
1473 {
1474 unsigned long pio_mask, mwdma_mask, udma_mask;
1475
1476 /* Usual case. Word 53 indicates word 64 is valid */
1477 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1478 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1479 pio_mask <<= 3;
1480 pio_mask |= 0x7;
1481 } else {
1482 /* If word 64 isn't valid then Word 51 high byte holds
1483 * the PIO timing number for the maximum. Turn it into
1484 * a mask.
1485 */
1486 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1487 if (mode < 5) /* Valid PIO range */
1488 pio_mask = (2 << mode) - 1;
1489 else
1490 pio_mask = 1;
1491
1492 /* But wait.. there's more. Design your standards by
1493 * committee and you too can get a free iordy field to
1494 * process. However its the speeds not the modes that
1495 * are supported... Note drivers using the timing API
1496 * will get this right anyway
1497 */
1498 }
1499
1500 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1501
1502 if (ata_id_is_cfa(id)) {
1503 /*
1504 * Process compact flash extended modes
1505 */
1506 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1507 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1508
1509 if (pio)
1510 pio_mask |= (1 << 5);
1511 if (pio > 1)
1512 pio_mask |= (1 << 6);
1513 if (dma)
1514 mwdma_mask |= (1 << 3);
1515 if (dma > 1)
1516 mwdma_mask |= (1 << 4);
1517 }
1518
1519 udma_mask = 0;
1520 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1521 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1522
1523 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1524 }
1525
1526 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1527 {
1528 struct completion *waiting = qc->private_data;
1529
1530 complete(waiting);
1531 }
1532
1533 /**
1534 * ata_exec_internal_sg - execute libata internal command
1535 * @dev: Device to which the command is sent
1536 * @tf: Taskfile registers for the command and the result
1537 * @cdb: CDB for packet command
1538 * @dma_dir: Data transfer direction of the command
1539 * @sgl: sg list for the data buffer of the command
1540 * @n_elem: Number of sg entries
1541 * @timeout: Timeout in msecs (0 for default)
1542 *
1543 * Executes libata internal command with timeout. @tf contains
1544 * command on entry and result on return. Timeout and error
1545 * conditions are reported via return value. No recovery action
1546 * is taken after a command times out. It's caller's duty to
1547 * clean up after timeout.
1548 *
1549 * LOCKING:
1550 * None. Should be called with kernel context, might sleep.
1551 *
1552 * RETURNS:
1553 * Zero on success, AC_ERR_* mask on failure
1554 */
1555 unsigned ata_exec_internal_sg(struct ata_device *dev,
1556 struct ata_taskfile *tf, const u8 *cdb,
1557 int dma_dir, struct scatterlist *sgl,
1558 unsigned int n_elem, unsigned long timeout)
1559 {
1560 struct ata_link *link = dev->link;
1561 struct ata_port *ap = link->ap;
1562 u8 command = tf->command;
1563 int auto_timeout = 0;
1564 struct ata_queued_cmd *qc;
1565 unsigned int tag, preempted_tag;
1566 u32 preempted_sactive, preempted_qc_active;
1567 int preempted_nr_active_links;
1568 DECLARE_COMPLETION_ONSTACK(wait);
1569 unsigned long flags;
1570 unsigned int err_mask;
1571 int rc;
1572
1573 spin_lock_irqsave(ap->lock, flags);
1574
1575 /* no internal command while frozen */
1576 if (ap->pflags & ATA_PFLAG_FROZEN) {
1577 spin_unlock_irqrestore(ap->lock, flags);
1578 return AC_ERR_SYSTEM;
1579 }
1580
1581 /* initialize internal qc */
1582
1583 /* XXX: Tag 0 is used for drivers with legacy EH as some
1584 * drivers choke if any other tag is given. This breaks
1585 * ata_tag_internal() test for those drivers. Don't use new
1586 * EH stuff without converting to it.
1587 */
1588 if (ap->ops->error_handler)
1589 tag = ATA_TAG_INTERNAL;
1590 else
1591 tag = 0;
1592
1593 qc = __ata_qc_from_tag(ap, tag);
1594
1595 qc->tag = tag;
1596 qc->scsicmd = NULL;
1597 qc->ap = ap;
1598 qc->dev = dev;
1599 ata_qc_reinit(qc);
1600
1601 preempted_tag = link->active_tag;
1602 preempted_sactive = link->sactive;
1603 preempted_qc_active = ap->qc_active;
1604 preempted_nr_active_links = ap->nr_active_links;
1605 link->active_tag = ATA_TAG_POISON;
1606 link->sactive = 0;
1607 ap->qc_active = 0;
1608 ap->nr_active_links = 0;
1609
1610 /* prepare & issue qc */
1611 qc->tf = *tf;
1612 if (cdb)
1613 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1614
1615 /* some SATA bridges need us to indicate data xfer direction */
1616 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1617 dma_dir == DMA_FROM_DEVICE)
1618 qc->tf.feature |= ATAPI_DMADIR;
1619
1620 qc->flags |= ATA_QCFLAG_RESULT_TF;
1621 qc->dma_dir = dma_dir;
1622 if (dma_dir != DMA_NONE) {
1623 unsigned int i, buflen = 0;
1624 struct scatterlist *sg;
1625
1626 for_each_sg(sgl, sg, n_elem, i)
1627 buflen += sg->length;
1628
1629 ata_sg_init(qc, sgl, n_elem);
1630 qc->nbytes = buflen;
1631 }
1632
1633 qc->private_data = &wait;
1634 qc->complete_fn = ata_qc_complete_internal;
1635
1636 ata_qc_issue(qc);
1637
1638 spin_unlock_irqrestore(ap->lock, flags);
1639
1640 if (!timeout) {
1641 if (ata_probe_timeout)
1642 timeout = ata_probe_timeout * 1000;
1643 else {
1644 timeout = ata_internal_cmd_timeout(dev, command);
1645 auto_timeout = 1;
1646 }
1647 }
1648
1649 if (ap->ops->error_handler)
1650 ata_eh_release(ap);
1651
1652 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1653
1654 if (ap->ops->error_handler)
1655 ata_eh_acquire(ap);
1656
1657 ata_sff_flush_pio_task(ap);
1658
1659 if (!rc) {
1660 spin_lock_irqsave(ap->lock, flags);
1661
1662 /* We're racing with irq here. If we lose, the
1663 * following test prevents us from completing the qc
1664 * twice. If we win, the port is frozen and will be
1665 * cleaned up by ->post_internal_cmd().
1666 */
1667 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1668 qc->err_mask |= AC_ERR_TIMEOUT;
1669
1670 if (ap->ops->error_handler)
1671 ata_port_freeze(ap);
1672 else
1673 ata_qc_complete(qc);
1674
1675 if (ata_msg_warn(ap))
1676 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1677 command);
1678 }
1679
1680 spin_unlock_irqrestore(ap->lock, flags);
1681 }
1682
1683 /* do post_internal_cmd */
1684 if (ap->ops->post_internal_cmd)
1685 ap->ops->post_internal_cmd(qc);
1686
1687 /* perform minimal error analysis */
1688 if (qc->flags & ATA_QCFLAG_FAILED) {
1689 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1690 qc->err_mask |= AC_ERR_DEV;
1691
1692 if (!qc->err_mask)
1693 qc->err_mask |= AC_ERR_OTHER;
1694
1695 if (qc->err_mask & ~AC_ERR_OTHER)
1696 qc->err_mask &= ~AC_ERR_OTHER;
1697 }
1698
1699 /* finish up */
1700 spin_lock_irqsave(ap->lock, flags);
1701
1702 *tf = qc->result_tf;
1703 err_mask = qc->err_mask;
1704
1705 ata_qc_free(qc);
1706 link->active_tag = preempted_tag;
1707 link->sactive = preempted_sactive;
1708 ap->qc_active = preempted_qc_active;
1709 ap->nr_active_links = preempted_nr_active_links;
1710
1711 spin_unlock_irqrestore(ap->lock, flags);
1712
1713 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1714 ata_internal_cmd_timed_out(dev, command);
1715
1716 return err_mask;
1717 }
1718
1719 /**
1720 * ata_exec_internal - execute libata internal command
1721 * @dev: Device to which the command is sent
1722 * @tf: Taskfile registers for the command and the result
1723 * @cdb: CDB for packet command
1724 * @dma_dir: Data transfer direction of the command
1725 * @buf: Data buffer of the command
1726 * @buflen: Length of data buffer
1727 * @timeout: Timeout in msecs (0 for default)
1728 *
1729 * Wrapper around ata_exec_internal_sg() which takes simple
1730 * buffer instead of sg list.
1731 *
1732 * LOCKING:
1733 * None. Should be called with kernel context, might sleep.
1734 *
1735 * RETURNS:
1736 * Zero on success, AC_ERR_* mask on failure
1737 */
1738 unsigned ata_exec_internal(struct ata_device *dev,
1739 struct ata_taskfile *tf, const u8 *cdb,
1740 int dma_dir, void *buf, unsigned int buflen,
1741 unsigned long timeout)
1742 {
1743 struct scatterlist *psg = NULL, sg;
1744 unsigned int n_elem = 0;
1745
1746 if (dma_dir != DMA_NONE) {
1747 WARN_ON(!buf);
1748 sg_init_one(&sg, buf, buflen);
1749 psg = &sg;
1750 n_elem++;
1751 }
1752
1753 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1754 timeout);
1755 }
1756
1757 /**
1758 * ata_pio_need_iordy - check if iordy needed
1759 * @adev: ATA device
1760 *
1761 * Check if the current speed of the device requires IORDY. Used
1762 * by various controllers for chip configuration.
1763 */
1764 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1765 {
1766 /* Don't set IORDY if we're preparing for reset. IORDY may
1767 * lead to controller lock up on certain controllers if the
1768 * port is not occupied. See bko#11703 for details.
1769 */
1770 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1771 return 0;
1772 /* Controller doesn't support IORDY. Probably a pointless
1773 * check as the caller should know this.
1774 */
1775 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1776 return 0;
1777 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1778 if (ata_id_is_cfa(adev->id)
1779 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1780 return 0;
1781 /* PIO3 and higher it is mandatory */
1782 if (adev->pio_mode > XFER_PIO_2)
1783 return 1;
1784 /* We turn it on when possible */
1785 if (ata_id_has_iordy(adev->id))
1786 return 1;
1787 return 0;
1788 }
1789
1790 /**
1791 * ata_pio_mask_no_iordy - Return the non IORDY mask
1792 * @adev: ATA device
1793 *
1794 * Compute the highest mode possible if we are not using iordy. Return
1795 * -1 if no iordy mode is available.
1796 */
1797 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1798 {
1799 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1800 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1801 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1802 /* Is the speed faster than the drive allows non IORDY ? */
1803 if (pio) {
1804 /* This is cycle times not frequency - watch the logic! */
1805 if (pio > 240) /* PIO2 is 240nS per cycle */
1806 return 3 << ATA_SHIFT_PIO;
1807 return 7 << ATA_SHIFT_PIO;
1808 }
1809 }
1810 return 3 << ATA_SHIFT_PIO;
1811 }
1812
1813 /**
1814 * ata_do_dev_read_id - default ID read method
1815 * @dev: device
1816 * @tf: proposed taskfile
1817 * @id: data buffer
1818 *
1819 * Issue the identify taskfile and hand back the buffer containing
1820 * identify data. For some RAID controllers and for pre ATA devices
1821 * this function is wrapped or replaced by the driver
1822 */
1823 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1824 struct ata_taskfile *tf, u16 *id)
1825 {
1826 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1827 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1828 }
1829
1830 /**
1831 * ata_dev_read_id - Read ID data from the specified device
1832 * @dev: target device
1833 * @p_class: pointer to class of the target device (may be changed)
1834 * @flags: ATA_READID_* flags
1835 * @id: buffer to read IDENTIFY data into
1836 *
1837 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1838 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1839 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1840 * for pre-ATA4 drives.
1841 *
1842 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1843 * now we abort if we hit that case.
1844 *
1845 * LOCKING:
1846 * Kernel thread context (may sleep)
1847 *
1848 * RETURNS:
1849 * 0 on success, -errno otherwise.
1850 */
1851 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1852 unsigned int flags, u16 *id)
1853 {
1854 struct ata_port *ap = dev->link->ap;
1855 unsigned int class = *p_class;
1856 struct ata_taskfile tf;
1857 unsigned int err_mask = 0;
1858 const char *reason;
1859 bool is_semb = class == ATA_DEV_SEMB;
1860 int may_fallback = 1, tried_spinup = 0;
1861 int rc;
1862
1863 if (ata_msg_ctl(ap))
1864 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1865
1866 retry:
1867 ata_tf_init(dev, &tf);
1868
1869 switch (class) {
1870 case ATA_DEV_SEMB:
1871 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
1872 case ATA_DEV_ATA:
1873 case ATA_DEV_ZAC:
1874 tf.command = ATA_CMD_ID_ATA;
1875 break;
1876 case ATA_DEV_ATAPI:
1877 tf.command = ATA_CMD_ID_ATAPI;
1878 break;
1879 default:
1880 rc = -ENODEV;
1881 reason = "unsupported class";
1882 goto err_out;
1883 }
1884
1885 tf.protocol = ATA_PROT_PIO;
1886
1887 /* Some devices choke if TF registers contain garbage. Make
1888 * sure those are properly initialized.
1889 */
1890 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1891
1892 /* Device presence detection is unreliable on some
1893 * controllers. Always poll IDENTIFY if available.
1894 */
1895 tf.flags |= ATA_TFLAG_POLLING;
1896
1897 if (ap->ops->read_id)
1898 err_mask = ap->ops->read_id(dev, &tf, id);
1899 else
1900 err_mask = ata_do_dev_read_id(dev, &tf, id);
1901
1902 if (err_mask) {
1903 if (err_mask & AC_ERR_NODEV_HINT) {
1904 ata_dev_dbg(dev, "NODEV after polling detection\n");
1905 return -ENOENT;
1906 }
1907
1908 if (is_semb) {
1909 ata_dev_info(dev,
1910 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1911 /* SEMB is not supported yet */
1912 *p_class = ATA_DEV_SEMB_UNSUP;
1913 return 0;
1914 }
1915
1916 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1917 /* Device or controller might have reported
1918 * the wrong device class. Give a shot at the
1919 * other IDENTIFY if the current one is
1920 * aborted by the device.
1921 */
1922 if (may_fallback) {
1923 may_fallback = 0;
1924
1925 if (class == ATA_DEV_ATA)
1926 class = ATA_DEV_ATAPI;
1927 else
1928 class = ATA_DEV_ATA;
1929 goto retry;
1930 }
1931
1932 /* Control reaches here iff the device aborted
1933 * both flavors of IDENTIFYs which happens
1934 * sometimes with phantom devices.
1935 */
1936 ata_dev_dbg(dev,
1937 "both IDENTIFYs aborted, assuming NODEV\n");
1938 return -ENOENT;
1939 }
1940
1941 rc = -EIO;
1942 reason = "I/O error";
1943 goto err_out;
1944 }
1945
1946 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1947 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1948 "class=%d may_fallback=%d tried_spinup=%d\n",
1949 class, may_fallback, tried_spinup);
1950 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1951 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1952 }
1953
1954 /* Falling back doesn't make sense if ID data was read
1955 * successfully at least once.
1956 */
1957 may_fallback = 0;
1958
1959 swap_buf_le16(id, ATA_ID_WORDS);
1960
1961 /* sanity check */
1962 rc = -EINVAL;
1963 reason = "device reports invalid type";
1964
1965 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1966 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1967 goto err_out;
1968 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1969 ata_id_is_ata(id)) {
1970 ata_dev_dbg(dev,
1971 "host indicates ignore ATA devices, ignored\n");
1972 return -ENOENT;
1973 }
1974 } else {
1975 if (ata_id_is_ata(id))
1976 goto err_out;
1977 }
1978
1979 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1980 tried_spinup = 1;
1981 /*
1982 * Drive powered-up in standby mode, and requires a specific
1983 * SET_FEATURES spin-up subcommand before it will accept
1984 * anything other than the original IDENTIFY command.
1985 */
1986 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1987 if (err_mask && id[2] != 0x738c) {
1988 rc = -EIO;
1989 reason = "SPINUP failed";
1990 goto err_out;
1991 }
1992 /*
1993 * If the drive initially returned incomplete IDENTIFY info,
1994 * we now must reissue the IDENTIFY command.
1995 */
1996 if (id[2] == 0x37c8)
1997 goto retry;
1998 }
1999
2000 if ((flags & ATA_READID_POSTRESET) &&
2001 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
2002 /*
2003 * The exact sequence expected by certain pre-ATA4 drives is:
2004 * SRST RESET
2005 * IDENTIFY (optional in early ATA)
2006 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2007 * anything else..
2008 * Some drives were very specific about that exact sequence.
2009 *
2010 * Note that ATA4 says lba is mandatory so the second check
2011 * should never trigger.
2012 */
2013 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2014 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2015 if (err_mask) {
2016 rc = -EIO;
2017 reason = "INIT_DEV_PARAMS failed";
2018 goto err_out;
2019 }
2020
2021 /* current CHS translation info (id[53-58]) might be
2022 * changed. reread the identify device info.
2023 */
2024 flags &= ~ATA_READID_POSTRESET;
2025 goto retry;
2026 }
2027 }
2028
2029 *p_class = class;
2030
2031 return 0;
2032
2033 err_out:
2034 if (ata_msg_warn(ap))
2035 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2036 reason, err_mask);
2037 return rc;
2038 }
2039
2040 static int ata_do_link_spd_horkage(struct ata_device *dev)
2041 {
2042 struct ata_link *plink = ata_dev_phys_link(dev);
2043 u32 target, target_limit;
2044
2045 if (!sata_scr_valid(plink))
2046 return 0;
2047
2048 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2049 target = 1;
2050 else
2051 return 0;
2052
2053 target_limit = (1 << target) - 1;
2054
2055 /* if already on stricter limit, no need to push further */
2056 if (plink->sata_spd_limit <= target_limit)
2057 return 0;
2058
2059 plink->sata_spd_limit = target_limit;
2060
2061 /* Request another EH round by returning -EAGAIN if link is
2062 * going faster than the target speed. Forward progress is
2063 * guaranteed by setting sata_spd_limit to target_limit above.
2064 */
2065 if (plink->sata_spd > target) {
2066 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2067 sata_spd_string(target));
2068 return -EAGAIN;
2069 }
2070 return 0;
2071 }
2072
2073 static inline u8 ata_dev_knobble(struct ata_device *dev)
2074 {
2075 struct ata_port *ap = dev->link->ap;
2076
2077 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2078 return 0;
2079
2080 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2081 }
2082
2083 static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2084 {
2085 struct ata_port *ap = dev->link->ap;
2086 unsigned int err_mask;
2087 int log_index = ATA_LOG_NCQ_SEND_RECV * 2;
2088 u16 log_pages;
2089
2090 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2091 0, ap->sector_buf, 1);
2092 if (err_mask) {
2093 ata_dev_dbg(dev,
2094 "failed to get Log Directory Emask 0x%x\n",
2095 err_mask);
2096 return;
2097 }
2098 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2099 if (!log_pages) {
2100 ata_dev_warn(dev,
2101 "NCQ Send/Recv Log not supported\n");
2102 return;
2103 }
2104 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2105 0, ap->sector_buf, 1);
2106 if (err_mask) {
2107 ata_dev_dbg(dev,
2108 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2109 err_mask);
2110 } else {
2111 u8 *cmds = dev->ncq_send_recv_cmds;
2112
2113 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2114 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2115
2116 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2117 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2118 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2119 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2120 }
2121 }
2122 }
2123
2124 static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2125 {
2126 struct ata_port *ap = dev->link->ap;
2127 unsigned int err_mask;
2128 int log_index = ATA_LOG_NCQ_NON_DATA * 2;
2129 u16 log_pages;
2130
2131 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2132 0, ap->sector_buf, 1);
2133 if (err_mask) {
2134 ata_dev_dbg(dev,
2135 "failed to get Log Directory Emask 0x%x\n",
2136 err_mask);
2137 return;
2138 }
2139 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2140 if (!log_pages) {
2141 ata_dev_warn(dev,
2142 "NCQ Send/Recv Log not supported\n");
2143 return;
2144 }
2145 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2146 0, ap->sector_buf, 1);
2147 if (err_mask) {
2148 ata_dev_dbg(dev,
2149 "failed to get NCQ Non-Data Log Emask 0x%x\n",
2150 err_mask);
2151 } else {
2152 u8 *cmds = dev->ncq_non_data_cmds;
2153
2154 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2155 }
2156 }
2157
2158 static int ata_dev_config_ncq(struct ata_device *dev,
2159 char *desc, size_t desc_sz)
2160 {
2161 struct ata_port *ap = dev->link->ap;
2162 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2163 unsigned int err_mask;
2164 char *aa_desc = "";
2165
2166 if (!ata_id_has_ncq(dev->id)) {
2167 desc[0] = '\0';
2168 return 0;
2169 }
2170 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2171 snprintf(desc, desc_sz, "NCQ (not used)");
2172 return 0;
2173 }
2174 if (ap->flags & ATA_FLAG_NCQ) {
2175 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2176 dev->flags |= ATA_DFLAG_NCQ;
2177 }
2178
2179 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2180 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2181 ata_id_has_fpdma_aa(dev->id)) {
2182 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2183 SATA_FPDMA_AA);
2184 if (err_mask) {
2185 ata_dev_err(dev,
2186 "failed to enable AA (error_mask=0x%x)\n",
2187 err_mask);
2188 if (err_mask != AC_ERR_DEV) {
2189 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2190 return -EIO;
2191 }
2192 } else
2193 aa_desc = ", AA";
2194 }
2195
2196 if (hdepth >= ddepth)
2197 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2198 else
2199 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2200 ddepth, aa_desc);
2201
2202 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2203 if (ata_id_has_ncq_send_and_recv(dev->id))
2204 ata_dev_config_ncq_send_recv(dev);
2205 if (ata_id_has_ncq_non_data(dev->id))
2206 ata_dev_config_ncq_non_data(dev);
2207 }
2208
2209 return 0;
2210 }
2211
2212 static void ata_dev_config_sense_reporting(struct ata_device *dev)
2213 {
2214 unsigned int err_mask;
2215
2216 if (!ata_id_has_sense_reporting(dev->id))
2217 return;
2218
2219 if (ata_id_sense_reporting_enabled(dev->id))
2220 return;
2221
2222 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2223 if (err_mask) {
2224 ata_dev_dbg(dev,
2225 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2226 err_mask);
2227 }
2228 }
2229
2230 static void ata_dev_config_zac(struct ata_device *dev)
2231 {
2232 struct ata_port *ap = dev->link->ap;
2233 unsigned int err_mask;
2234 u8 *identify_buf = ap->sector_buf;
2235 int log_index = ATA_LOG_SATA_ID_DEV_DATA * 2, i, found = 0;
2236 u16 log_pages;
2237
2238 dev->zac_zones_optimal_open = U32_MAX;
2239 dev->zac_zones_optimal_nonseq = U32_MAX;
2240 dev->zac_zones_max_open = U32_MAX;
2241
2242 /*
2243 * Always set the 'ZAC' flag for Host-managed devices.
2244 */
2245 if (dev->class == ATA_DEV_ZAC)
2246 dev->flags |= ATA_DFLAG_ZAC;
2247 else if (ata_id_zoned_cap(dev->id) == 0x01)
2248 /*
2249 * Check for host-aware devices.
2250 */
2251 dev->flags |= ATA_DFLAG_ZAC;
2252
2253 if (!(dev->flags & ATA_DFLAG_ZAC))
2254 return;
2255
2256 /*
2257 * Read Log Directory to figure out if IDENTIFY DEVICE log
2258 * is supported.
2259 */
2260 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2261 0, ap->sector_buf, 1);
2262 if (err_mask) {
2263 ata_dev_info(dev,
2264 "failed to get Log Directory Emask 0x%x\n",
2265 err_mask);
2266 return;
2267 }
2268 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2269 if (log_pages == 0) {
2270 ata_dev_warn(dev,
2271 "ATA Identify Device Log not supported\n");
2272 return;
2273 }
2274 /*
2275 * Read IDENTIFY DEVICE data log, page 0, to figure out
2276 * if page 9 is supported.
2277 */
2278 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, 0,
2279 identify_buf, 1);
2280 if (err_mask) {
2281 ata_dev_info(dev,
2282 "failed to get Device Identify Log Emask 0x%x\n",
2283 err_mask);
2284 return;
2285 }
2286 log_pages = identify_buf[8];
2287 for (i = 0; i < log_pages; i++) {
2288 if (identify_buf[9 + i] == ATA_LOG_ZONED_INFORMATION) {
2289 found++;
2290 break;
2291 }
2292 }
2293 if (!found) {
2294 ata_dev_warn(dev,
2295 "ATA Zoned Information Log not supported\n");
2296 return;
2297 }
2298
2299 /*
2300 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2301 */
2302 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA,
2303 ATA_LOG_ZONED_INFORMATION,
2304 identify_buf, 1);
2305 if (!err_mask) {
2306 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2307
2308 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2309 if ((zoned_cap >> 63))
2310 dev->zac_zoned_cap = (zoned_cap & 1);
2311 opt_open = get_unaligned_le64(&identify_buf[24]);
2312 if ((opt_open >> 63))
2313 dev->zac_zones_optimal_open = (u32)opt_open;
2314 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2315 if ((opt_nonseq >> 63))
2316 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2317 max_open = get_unaligned_le64(&identify_buf[40]);
2318 if ((max_open >> 63))
2319 dev->zac_zones_max_open = (u32)max_open;
2320 }
2321 }
2322
2323 /**
2324 * ata_dev_configure - Configure the specified ATA/ATAPI device
2325 * @dev: Target device to configure
2326 *
2327 * Configure @dev according to @dev->id. Generic and low-level
2328 * driver specific fixups are also applied.
2329 *
2330 * LOCKING:
2331 * Kernel thread context (may sleep)
2332 *
2333 * RETURNS:
2334 * 0 on success, -errno otherwise
2335 */
2336 int ata_dev_configure(struct ata_device *dev)
2337 {
2338 struct ata_port *ap = dev->link->ap;
2339 struct ata_eh_context *ehc = &dev->link->eh_context;
2340 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2341 const u16 *id = dev->id;
2342 unsigned long xfer_mask;
2343 unsigned int err_mask;
2344 char revbuf[7]; /* XYZ-99\0 */
2345 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2346 char modelbuf[ATA_ID_PROD_LEN+1];
2347 int rc;
2348
2349 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2350 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2351 return 0;
2352 }
2353
2354 if (ata_msg_probe(ap))
2355 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2356
2357 /* set horkage */
2358 dev->horkage |= ata_dev_blacklisted(dev);
2359 ata_force_horkage(dev);
2360
2361 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2362 ata_dev_info(dev, "unsupported device, disabling\n");
2363 ata_dev_disable(dev);
2364 return 0;
2365 }
2366
2367 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2368 dev->class == ATA_DEV_ATAPI) {
2369 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2370 atapi_enabled ? "not supported with this driver"
2371 : "disabled");
2372 ata_dev_disable(dev);
2373 return 0;
2374 }
2375
2376 rc = ata_do_link_spd_horkage(dev);
2377 if (rc)
2378 return rc;
2379
2380 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2381 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2382 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2383 dev->horkage |= ATA_HORKAGE_NOLPM;
2384
2385 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2386 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2387 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2388 }
2389
2390 /* let ACPI work its magic */
2391 rc = ata_acpi_on_devcfg(dev);
2392 if (rc)
2393 return rc;
2394
2395 /* massage HPA, do it early as it might change IDENTIFY data */
2396 rc = ata_hpa_resize(dev);
2397 if (rc)
2398 return rc;
2399
2400 /* print device capabilities */
2401 if (ata_msg_probe(ap))
2402 ata_dev_dbg(dev,
2403 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2404 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2405 __func__,
2406 id[49], id[82], id[83], id[84],
2407 id[85], id[86], id[87], id[88]);
2408
2409 /* initialize to-be-configured parameters */
2410 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2411 dev->max_sectors = 0;
2412 dev->cdb_len = 0;
2413 dev->n_sectors = 0;
2414 dev->cylinders = 0;
2415 dev->heads = 0;
2416 dev->sectors = 0;
2417 dev->multi_count = 0;
2418
2419 /*
2420 * common ATA, ATAPI feature tests
2421 */
2422
2423 /* find max transfer mode; for printk only */
2424 xfer_mask = ata_id_xfermask(id);
2425
2426 if (ata_msg_probe(ap))
2427 ata_dump_id(id);
2428
2429 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2430 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2431 sizeof(fwrevbuf));
2432
2433 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2434 sizeof(modelbuf));
2435
2436 /* ATA-specific feature tests */
2437 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2438 if (ata_id_is_cfa(id)) {
2439 /* CPRM may make this media unusable */
2440 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2441 ata_dev_warn(dev,
2442 "supports DRM functions and may not be fully accessible\n");
2443 snprintf(revbuf, 7, "CFA");
2444 } else {
2445 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2446 /* Warn the user if the device has TPM extensions */
2447 if (ata_id_has_tpm(id))
2448 ata_dev_warn(dev,
2449 "supports DRM functions and may not be fully accessible\n");
2450 }
2451
2452 dev->n_sectors = ata_id_n_sectors(id);
2453
2454 /* get current R/W Multiple count setting */
2455 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2456 unsigned int max = dev->id[47] & 0xff;
2457 unsigned int cnt = dev->id[59] & 0xff;
2458 /* only recognize/allow powers of two here */
2459 if (is_power_of_2(max) && is_power_of_2(cnt))
2460 if (cnt <= max)
2461 dev->multi_count = cnt;
2462 }
2463
2464 if (ata_id_has_lba(id)) {
2465 const char *lba_desc;
2466 char ncq_desc[24];
2467
2468 lba_desc = "LBA";
2469 dev->flags |= ATA_DFLAG_LBA;
2470 if (ata_id_has_lba48(id)) {
2471 dev->flags |= ATA_DFLAG_LBA48;
2472 lba_desc = "LBA48";
2473
2474 if (dev->n_sectors >= (1UL << 28) &&
2475 ata_id_has_flush_ext(id))
2476 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2477 }
2478
2479 /* config NCQ */
2480 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2481 if (rc)
2482 return rc;
2483
2484 /* print device info to dmesg */
2485 if (ata_msg_drv(ap) && print_info) {
2486 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2487 revbuf, modelbuf, fwrevbuf,
2488 ata_mode_string(xfer_mask));
2489 ata_dev_info(dev,
2490 "%llu sectors, multi %u: %s %s\n",
2491 (unsigned long long)dev->n_sectors,
2492 dev->multi_count, lba_desc, ncq_desc);
2493 }
2494 } else {
2495 /* CHS */
2496
2497 /* Default translation */
2498 dev->cylinders = id[1];
2499 dev->heads = id[3];
2500 dev->sectors = id[6];
2501
2502 if (ata_id_current_chs_valid(id)) {
2503 /* Current CHS translation is valid. */
2504 dev->cylinders = id[54];
2505 dev->heads = id[55];
2506 dev->sectors = id[56];
2507 }
2508
2509 /* print device info to dmesg */
2510 if (ata_msg_drv(ap) && print_info) {
2511 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2512 revbuf, modelbuf, fwrevbuf,
2513 ata_mode_string(xfer_mask));
2514 ata_dev_info(dev,
2515 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2516 (unsigned long long)dev->n_sectors,
2517 dev->multi_count, dev->cylinders,
2518 dev->heads, dev->sectors);
2519 }
2520 }
2521
2522 /* Check and mark DevSlp capability. Get DevSlp timing variables
2523 * from SATA Settings page of Identify Device Data Log.
2524 */
2525 if (ata_id_has_devslp(dev->id)) {
2526 u8 *sata_setting = ap->sector_buf;
2527 int i, j;
2528
2529 dev->flags |= ATA_DFLAG_DEVSLP;
2530 err_mask = ata_read_log_page(dev,
2531 ATA_LOG_SATA_ID_DEV_DATA,
2532 ATA_LOG_SATA_SETTINGS,
2533 sata_setting,
2534 1);
2535 if (err_mask)
2536 ata_dev_dbg(dev,
2537 "failed to get Identify Device Data, Emask 0x%x\n",
2538 err_mask);
2539 else
2540 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2541 j = ATA_LOG_DEVSLP_OFFSET + i;
2542 dev->devslp_timing[i] = sata_setting[j];
2543 }
2544 }
2545 ata_dev_config_sense_reporting(dev);
2546 ata_dev_config_zac(dev);
2547 dev->cdb_len = 16;
2548 }
2549
2550 /* ATAPI-specific feature tests */
2551 else if (dev->class == ATA_DEV_ATAPI) {
2552 const char *cdb_intr_string = "";
2553 const char *atapi_an_string = "";
2554 const char *dma_dir_string = "";
2555 u32 sntf;
2556
2557 rc = atapi_cdb_len(id);
2558 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2559 if (ata_msg_warn(ap))
2560 ata_dev_warn(dev, "unsupported CDB len\n");
2561 rc = -EINVAL;
2562 goto err_out_nosup;
2563 }
2564 dev->cdb_len = (unsigned int) rc;
2565
2566 /* Enable ATAPI AN if both the host and device have
2567 * the support. If PMP is attached, SNTF is required
2568 * to enable ATAPI AN to discern between PHY status
2569 * changed notifications and ATAPI ANs.
2570 */
2571 if (atapi_an &&
2572 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2573 (!sata_pmp_attached(ap) ||
2574 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2575 /* issue SET feature command to turn this on */
2576 err_mask = ata_dev_set_feature(dev,
2577 SETFEATURES_SATA_ENABLE, SATA_AN);
2578 if (err_mask)
2579 ata_dev_err(dev,
2580 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2581 err_mask);
2582 else {
2583 dev->flags |= ATA_DFLAG_AN;
2584 atapi_an_string = ", ATAPI AN";
2585 }
2586 }
2587
2588 if (ata_id_cdb_intr(dev->id)) {
2589 dev->flags |= ATA_DFLAG_CDB_INTR;
2590 cdb_intr_string = ", CDB intr";
2591 }
2592
2593 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2594 dev->flags |= ATA_DFLAG_DMADIR;
2595 dma_dir_string = ", DMADIR";
2596 }
2597
2598 if (ata_id_has_da(dev->id)) {
2599 dev->flags |= ATA_DFLAG_DA;
2600 zpodd_init(dev);
2601 }
2602
2603 /* print device info to dmesg */
2604 if (ata_msg_drv(ap) && print_info)
2605 ata_dev_info(dev,
2606 "ATAPI: %s, %s, max %s%s%s%s\n",
2607 modelbuf, fwrevbuf,
2608 ata_mode_string(xfer_mask),
2609 cdb_intr_string, atapi_an_string,
2610 dma_dir_string);
2611 }
2612
2613 /* determine max_sectors */
2614 dev->max_sectors = ATA_MAX_SECTORS;
2615 if (dev->flags & ATA_DFLAG_LBA48)
2616 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2617
2618 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2619 200 sectors */
2620 if (ata_dev_knobble(dev)) {
2621 if (ata_msg_drv(ap) && print_info)
2622 ata_dev_info(dev, "applying bridge limits\n");
2623 dev->udma_mask &= ATA_UDMA5;
2624 dev->max_sectors = ATA_MAX_SECTORS;
2625 }
2626
2627 if ((dev->class == ATA_DEV_ATAPI) &&
2628 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2629 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2630 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2631 }
2632
2633 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2634 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2635 dev->max_sectors);
2636
2637 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2638 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2639 dev->max_sectors);
2640
2641 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2642 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2643
2644 if (ap->ops->dev_config)
2645 ap->ops->dev_config(dev);
2646
2647 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2648 /* Let the user know. We don't want to disallow opens for
2649 rescue purposes, or in case the vendor is just a blithering
2650 idiot. Do this after the dev_config call as some controllers
2651 with buggy firmware may want to avoid reporting false device
2652 bugs */
2653
2654 if (print_info) {
2655 ata_dev_warn(dev,
2656 "Drive reports diagnostics failure. This may indicate a drive\n");
2657 ata_dev_warn(dev,
2658 "fault or invalid emulation. Contact drive vendor for information.\n");
2659 }
2660 }
2661
2662 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2663 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2664 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2665 }
2666
2667 return 0;
2668
2669 err_out_nosup:
2670 if (ata_msg_probe(ap))
2671 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2672 return rc;
2673 }
2674
2675 /**
2676 * ata_cable_40wire - return 40 wire cable type
2677 * @ap: port
2678 *
2679 * Helper method for drivers which want to hardwire 40 wire cable
2680 * detection.
2681 */
2682
2683 int ata_cable_40wire(struct ata_port *ap)
2684 {
2685 return ATA_CBL_PATA40;
2686 }
2687
2688 /**
2689 * ata_cable_80wire - return 80 wire cable type
2690 * @ap: port
2691 *
2692 * Helper method for drivers which want to hardwire 80 wire cable
2693 * detection.
2694 */
2695
2696 int ata_cable_80wire(struct ata_port *ap)
2697 {
2698 return ATA_CBL_PATA80;
2699 }
2700
2701 /**
2702 * ata_cable_unknown - return unknown PATA cable.
2703 * @ap: port
2704 *
2705 * Helper method for drivers which have no PATA cable detection.
2706 */
2707
2708 int ata_cable_unknown(struct ata_port *ap)
2709 {
2710 return ATA_CBL_PATA_UNK;
2711 }
2712
2713 /**
2714 * ata_cable_ignore - return ignored PATA cable.
2715 * @ap: port
2716 *
2717 * Helper method for drivers which don't use cable type to limit
2718 * transfer mode.
2719 */
2720 int ata_cable_ignore(struct ata_port *ap)
2721 {
2722 return ATA_CBL_PATA_IGN;
2723 }
2724
2725 /**
2726 * ata_cable_sata - return SATA cable type
2727 * @ap: port
2728 *
2729 * Helper method for drivers which have SATA cables
2730 */
2731
2732 int ata_cable_sata(struct ata_port *ap)
2733 {
2734 return ATA_CBL_SATA;
2735 }
2736
2737 /**
2738 * ata_bus_probe - Reset and probe ATA bus
2739 * @ap: Bus to probe
2740 *
2741 * Master ATA bus probing function. Initiates a hardware-dependent
2742 * bus reset, then attempts to identify any devices found on
2743 * the bus.
2744 *
2745 * LOCKING:
2746 * PCI/etc. bus probe sem.
2747 *
2748 * RETURNS:
2749 * Zero on success, negative errno otherwise.
2750 */
2751
2752 int ata_bus_probe(struct ata_port *ap)
2753 {
2754 unsigned int classes[ATA_MAX_DEVICES];
2755 int tries[ATA_MAX_DEVICES];
2756 int rc;
2757 struct ata_device *dev;
2758
2759 ata_for_each_dev(dev, &ap->link, ALL)
2760 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2761
2762 retry:
2763 ata_for_each_dev(dev, &ap->link, ALL) {
2764 /* If we issue an SRST then an ATA drive (not ATAPI)
2765 * may change configuration and be in PIO0 timing. If
2766 * we do a hard reset (or are coming from power on)
2767 * this is true for ATA or ATAPI. Until we've set a
2768 * suitable controller mode we should not touch the
2769 * bus as we may be talking too fast.
2770 */
2771 dev->pio_mode = XFER_PIO_0;
2772 dev->dma_mode = 0xff;
2773
2774 /* If the controller has a pio mode setup function
2775 * then use it to set the chipset to rights. Don't
2776 * touch the DMA setup as that will be dealt with when
2777 * configuring devices.
2778 */
2779 if (ap->ops->set_piomode)
2780 ap->ops->set_piomode(ap, dev);
2781 }
2782
2783 /* reset and determine device classes */
2784 ap->ops->phy_reset(ap);
2785
2786 ata_for_each_dev(dev, &ap->link, ALL) {
2787 if (dev->class != ATA_DEV_UNKNOWN)
2788 classes[dev->devno] = dev->class;
2789 else
2790 classes[dev->devno] = ATA_DEV_NONE;
2791
2792 dev->class = ATA_DEV_UNKNOWN;
2793 }
2794
2795 /* read IDENTIFY page and configure devices. We have to do the identify
2796 specific sequence bass-ackwards so that PDIAG- is released by
2797 the slave device */
2798
2799 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2800 if (tries[dev->devno])
2801 dev->class = classes[dev->devno];
2802
2803 if (!ata_dev_enabled(dev))
2804 continue;
2805
2806 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2807 dev->id);
2808 if (rc)
2809 goto fail;
2810 }
2811
2812 /* Now ask for the cable type as PDIAG- should have been released */
2813 if (ap->ops->cable_detect)
2814 ap->cbl = ap->ops->cable_detect(ap);
2815
2816 /* We may have SATA bridge glue hiding here irrespective of
2817 * the reported cable types and sensed types. When SATA
2818 * drives indicate we have a bridge, we don't know which end
2819 * of the link the bridge is which is a problem.
2820 */
2821 ata_for_each_dev(dev, &ap->link, ENABLED)
2822 if (ata_id_is_sata(dev->id))
2823 ap->cbl = ATA_CBL_SATA;
2824
2825 /* After the identify sequence we can now set up the devices. We do
2826 this in the normal order so that the user doesn't get confused */
2827
2828 ata_for_each_dev(dev, &ap->link, ENABLED) {
2829 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2830 rc = ata_dev_configure(dev);
2831 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2832 if (rc)
2833 goto fail;
2834 }
2835
2836 /* configure transfer mode */
2837 rc = ata_set_mode(&ap->link, &dev);
2838 if (rc)
2839 goto fail;
2840
2841 ata_for_each_dev(dev, &ap->link, ENABLED)
2842 return 0;
2843
2844 return -ENODEV;
2845
2846 fail:
2847 tries[dev->devno]--;
2848
2849 switch (rc) {
2850 case -EINVAL:
2851 /* eeek, something went very wrong, give up */
2852 tries[dev->devno] = 0;
2853 break;
2854
2855 case -ENODEV:
2856 /* give it just one more chance */
2857 tries[dev->devno] = min(tries[dev->devno], 1);
2858 case -EIO:
2859 if (tries[dev->devno] == 1) {
2860 /* This is the last chance, better to slow
2861 * down than lose it.
2862 */
2863 sata_down_spd_limit(&ap->link, 0);
2864 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2865 }
2866 }
2867
2868 if (!tries[dev->devno])
2869 ata_dev_disable(dev);
2870
2871 goto retry;
2872 }
2873
2874 /**
2875 * sata_print_link_status - Print SATA link status
2876 * @link: SATA link to printk link status about
2877 *
2878 * This function prints link speed and status of a SATA link.
2879 *
2880 * LOCKING:
2881 * None.
2882 */
2883 static void sata_print_link_status(struct ata_link *link)
2884 {
2885 u32 sstatus, scontrol, tmp;
2886
2887 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2888 return;
2889 sata_scr_read(link, SCR_CONTROL, &scontrol);
2890
2891 if (ata_phys_link_online(link)) {
2892 tmp = (sstatus >> 4) & 0xf;
2893 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2894 sata_spd_string(tmp), sstatus, scontrol);
2895 } else {
2896 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2897 sstatus, scontrol);
2898 }
2899 }
2900
2901 /**
2902 * ata_dev_pair - return other device on cable
2903 * @adev: device
2904 *
2905 * Obtain the other device on the same cable, or if none is
2906 * present NULL is returned
2907 */
2908
2909 struct ata_device *ata_dev_pair(struct ata_device *adev)
2910 {
2911 struct ata_link *link = adev->link;
2912 struct ata_device *pair = &link->device[1 - adev->devno];
2913 if (!ata_dev_enabled(pair))
2914 return NULL;
2915 return pair;
2916 }
2917
2918 /**
2919 * sata_down_spd_limit - adjust SATA spd limit downward
2920 * @link: Link to adjust SATA spd limit for
2921 * @spd_limit: Additional limit
2922 *
2923 * Adjust SATA spd limit of @link downward. Note that this
2924 * function only adjusts the limit. The change must be applied
2925 * using sata_set_spd().
2926 *
2927 * If @spd_limit is non-zero, the speed is limited to equal to or
2928 * lower than @spd_limit if such speed is supported. If
2929 * @spd_limit is slower than any supported speed, only the lowest
2930 * supported speed is allowed.
2931 *
2932 * LOCKING:
2933 * Inherited from caller.
2934 *
2935 * RETURNS:
2936 * 0 on success, negative errno on failure
2937 */
2938 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2939 {
2940 u32 sstatus, spd, mask;
2941 int rc, bit;
2942
2943 if (!sata_scr_valid(link))
2944 return -EOPNOTSUPP;
2945
2946 /* If SCR can be read, use it to determine the current SPD.
2947 * If not, use cached value in link->sata_spd.
2948 */
2949 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2950 if (rc == 0 && ata_sstatus_online(sstatus))
2951 spd = (sstatus >> 4) & 0xf;
2952 else
2953 spd = link->sata_spd;
2954
2955 mask = link->sata_spd_limit;
2956 if (mask <= 1)
2957 return -EINVAL;
2958
2959 /* unconditionally mask off the highest bit */
2960 bit = fls(mask) - 1;
2961 mask &= ~(1 << bit);
2962
2963 /* Mask off all speeds higher than or equal to the current
2964 * one. Force 1.5Gbps if current SPD is not available.
2965 */
2966 if (spd > 1)
2967 mask &= (1 << (spd - 1)) - 1;
2968 else
2969 mask &= 1;
2970
2971 /* were we already at the bottom? */
2972 if (!mask)
2973 return -EINVAL;
2974
2975 if (spd_limit) {
2976 if (mask & ((1 << spd_limit) - 1))
2977 mask &= (1 << spd_limit) - 1;
2978 else {
2979 bit = ffs(mask) - 1;
2980 mask = 1 << bit;
2981 }
2982 }
2983
2984 link->sata_spd_limit = mask;
2985
2986 ata_link_warn(link, "limiting SATA link speed to %s\n",
2987 sata_spd_string(fls(mask)));
2988
2989 return 0;
2990 }
2991
2992 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2993 {
2994 struct ata_link *host_link = &link->ap->link;
2995 u32 limit, target, spd;
2996
2997 limit = link->sata_spd_limit;
2998
2999 /* Don't configure downstream link faster than upstream link.
3000 * It doesn't speed up anything and some PMPs choke on such
3001 * configuration.
3002 */
3003 if (!ata_is_host_link(link) && host_link->sata_spd)
3004 limit &= (1 << host_link->sata_spd) - 1;
3005
3006 if (limit == UINT_MAX)
3007 target = 0;
3008 else
3009 target = fls(limit);
3010
3011 spd = (*scontrol >> 4) & 0xf;
3012 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
3013
3014 return spd != target;
3015 }
3016
3017 /**
3018 * sata_set_spd_needed - is SATA spd configuration needed
3019 * @link: Link in question
3020 *
3021 * Test whether the spd limit in SControl matches
3022 * @link->sata_spd_limit. This function is used to determine
3023 * whether hardreset is necessary to apply SATA spd
3024 * configuration.
3025 *
3026 * LOCKING:
3027 * Inherited from caller.
3028 *
3029 * RETURNS:
3030 * 1 if SATA spd configuration is needed, 0 otherwise.
3031 */
3032 static int sata_set_spd_needed(struct ata_link *link)
3033 {
3034 u32 scontrol;
3035
3036 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3037 return 1;
3038
3039 return __sata_set_spd_needed(link, &scontrol);
3040 }
3041
3042 /**
3043 * sata_set_spd - set SATA spd according to spd limit
3044 * @link: Link to set SATA spd for
3045 *
3046 * Set SATA spd of @link according to sata_spd_limit.
3047 *
3048 * LOCKING:
3049 * Inherited from caller.
3050 *
3051 * RETURNS:
3052 * 0 if spd doesn't need to be changed, 1 if spd has been
3053 * changed. Negative errno if SCR registers are inaccessible.
3054 */
3055 int sata_set_spd(struct ata_link *link)
3056 {
3057 u32 scontrol;
3058 int rc;
3059
3060 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3061 return rc;
3062
3063 if (!__sata_set_spd_needed(link, &scontrol))
3064 return 0;
3065
3066 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3067 return rc;
3068
3069 return 1;
3070 }
3071
3072 /*
3073 * This mode timing computation functionality is ported over from
3074 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3075 */
3076 /*
3077 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3078 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3079 * for UDMA6, which is currently supported only by Maxtor drives.
3080 *
3081 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3082 */
3083
3084 static const struct ata_timing ata_timing[] = {
3085 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
3086 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
3087 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
3088 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
3089 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
3090 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
3091 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
3092 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
3093
3094 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
3095 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
3096 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
3097
3098 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
3099 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
3100 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
3101 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
3102 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
3103
3104 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
3105 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
3106 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
3107 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
3108 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
3109 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
3110 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
3111 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
3112
3113 { 0xFF }
3114 };
3115
3116 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3117 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
3118
3119 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3120 {
3121 q->setup = EZ(t->setup * 1000, T);
3122 q->act8b = EZ(t->act8b * 1000, T);
3123 q->rec8b = EZ(t->rec8b * 1000, T);
3124 q->cyc8b = EZ(t->cyc8b * 1000, T);
3125 q->active = EZ(t->active * 1000, T);
3126 q->recover = EZ(t->recover * 1000, T);
3127 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
3128 q->cycle = EZ(t->cycle * 1000, T);
3129 q->udma = EZ(t->udma * 1000, UT);
3130 }
3131
3132 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3133 struct ata_timing *m, unsigned int what)
3134 {
3135 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3136 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3137 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3138 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3139 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3140 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3141 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3142 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3143 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3144 }
3145
3146 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3147 {
3148 const struct ata_timing *t = ata_timing;
3149
3150 while (xfer_mode > t->mode)
3151 t++;
3152
3153 if (xfer_mode == t->mode)
3154 return t;
3155
3156 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3157 __func__, xfer_mode);
3158
3159 return NULL;
3160 }
3161
3162 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3163 struct ata_timing *t, int T, int UT)
3164 {
3165 const u16 *id = adev->id;
3166 const struct ata_timing *s;
3167 struct ata_timing p;
3168
3169 /*
3170 * Find the mode.
3171 */
3172
3173 if (!(s = ata_timing_find_mode(speed)))
3174 return -EINVAL;
3175
3176 memcpy(t, s, sizeof(*s));
3177
3178 /*
3179 * If the drive is an EIDE drive, it can tell us it needs extended
3180 * PIO/MW_DMA cycle timing.
3181 */
3182
3183 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3184 memset(&p, 0, sizeof(p));
3185
3186 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3187 if (speed <= XFER_PIO_2)
3188 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3189 else if ((speed <= XFER_PIO_4) ||
3190 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3191 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3192 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3193 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3194
3195 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3196 }
3197
3198 /*
3199 * Convert the timing to bus clock counts.
3200 */
3201
3202 ata_timing_quantize(t, t, T, UT);
3203
3204 /*
3205 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3206 * S.M.A.R.T * and some other commands. We have to ensure that the
3207 * DMA cycle timing is slower/equal than the fastest PIO timing.
3208 */
3209
3210 if (speed > XFER_PIO_6) {
3211 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3212 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3213 }
3214
3215 /*
3216 * Lengthen active & recovery time so that cycle time is correct.
3217 */
3218
3219 if (t->act8b + t->rec8b < t->cyc8b) {
3220 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3221 t->rec8b = t->cyc8b - t->act8b;
3222 }
3223
3224 if (t->active + t->recover < t->cycle) {
3225 t->active += (t->cycle - (t->active + t->recover)) / 2;
3226 t->recover = t->cycle - t->active;
3227 }
3228
3229 /* In a few cases quantisation may produce enough errors to
3230 leave t->cycle too low for the sum of active and recovery
3231 if so we must correct this */
3232 if (t->active + t->recover > t->cycle)
3233 t->cycle = t->active + t->recover;
3234
3235 return 0;
3236 }
3237
3238 /**
3239 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3240 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3241 * @cycle: cycle duration in ns
3242 *
3243 * Return matching xfer mode for @cycle. The returned mode is of
3244 * the transfer type specified by @xfer_shift. If @cycle is too
3245 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3246 * than the fastest known mode, the fasted mode is returned.
3247 *
3248 * LOCKING:
3249 * None.
3250 *
3251 * RETURNS:
3252 * Matching xfer_mode, 0xff if no match found.
3253 */
3254 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3255 {
3256 u8 base_mode = 0xff, last_mode = 0xff;
3257 const struct ata_xfer_ent *ent;
3258 const struct ata_timing *t;
3259
3260 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3261 if (ent->shift == xfer_shift)
3262 base_mode = ent->base;
3263
3264 for (t = ata_timing_find_mode(base_mode);
3265 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3266 unsigned short this_cycle;
3267
3268 switch (xfer_shift) {
3269 case ATA_SHIFT_PIO:
3270 case ATA_SHIFT_MWDMA:
3271 this_cycle = t->cycle;
3272 break;
3273 case ATA_SHIFT_UDMA:
3274 this_cycle = t->udma;
3275 break;
3276 default:
3277 return 0xff;
3278 }
3279
3280 if (cycle > this_cycle)
3281 break;
3282
3283 last_mode = t->mode;
3284 }
3285
3286 return last_mode;
3287 }
3288
3289 /**
3290 * ata_down_xfermask_limit - adjust dev xfer masks downward
3291 * @dev: Device to adjust xfer masks
3292 * @sel: ATA_DNXFER_* selector
3293 *
3294 * Adjust xfer masks of @dev downward. Note that this function
3295 * does not apply the change. Invoking ata_set_mode() afterwards
3296 * will apply the limit.
3297 *
3298 * LOCKING:
3299 * Inherited from caller.
3300 *
3301 * RETURNS:
3302 * 0 on success, negative errno on failure
3303 */
3304 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3305 {
3306 char buf[32];
3307 unsigned long orig_mask, xfer_mask;
3308 unsigned long pio_mask, mwdma_mask, udma_mask;
3309 int quiet, highbit;
3310
3311 quiet = !!(sel & ATA_DNXFER_QUIET);
3312 sel &= ~ATA_DNXFER_QUIET;
3313
3314 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3315 dev->mwdma_mask,
3316 dev->udma_mask);
3317 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3318
3319 switch (sel) {
3320 case ATA_DNXFER_PIO:
3321 highbit = fls(pio_mask) - 1;
3322 pio_mask &= ~(1 << highbit);
3323 break;
3324
3325 case ATA_DNXFER_DMA:
3326 if (udma_mask) {
3327 highbit = fls(udma_mask) - 1;
3328 udma_mask &= ~(1 << highbit);
3329 if (!udma_mask)
3330 return -ENOENT;
3331 } else if (mwdma_mask) {
3332 highbit = fls(mwdma_mask) - 1;
3333 mwdma_mask &= ~(1 << highbit);
3334 if (!mwdma_mask)
3335 return -ENOENT;
3336 }
3337 break;
3338
3339 case ATA_DNXFER_40C:
3340 udma_mask &= ATA_UDMA_MASK_40C;
3341 break;
3342
3343 case ATA_DNXFER_FORCE_PIO0:
3344 pio_mask &= 1;
3345 case ATA_DNXFER_FORCE_PIO:
3346 mwdma_mask = 0;
3347 udma_mask = 0;
3348 break;
3349
3350 default:
3351 BUG();
3352 }
3353
3354 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3355
3356 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3357 return -ENOENT;
3358
3359 if (!quiet) {
3360 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3361 snprintf(buf, sizeof(buf), "%s:%s",
3362 ata_mode_string(xfer_mask),
3363 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3364 else
3365 snprintf(buf, sizeof(buf), "%s",
3366 ata_mode_string(xfer_mask));
3367
3368 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3369 }
3370
3371 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3372 &dev->udma_mask);
3373
3374 return 0;
3375 }
3376
3377 static int ata_dev_set_mode(struct ata_device *dev)
3378 {
3379 struct ata_port *ap = dev->link->ap;
3380 struct ata_eh_context *ehc = &dev->link->eh_context;
3381 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3382 const char *dev_err_whine = "";
3383 int ign_dev_err = 0;
3384 unsigned int err_mask = 0;
3385 int rc;
3386
3387 dev->flags &= ~ATA_DFLAG_PIO;
3388 if (dev->xfer_shift == ATA_SHIFT_PIO)
3389 dev->flags |= ATA_DFLAG_PIO;
3390
3391 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3392 dev_err_whine = " (SET_XFERMODE skipped)";
3393 else {
3394 if (nosetxfer)
3395 ata_dev_warn(dev,
3396 "NOSETXFER but PATA detected - can't "
3397 "skip SETXFER, might malfunction\n");
3398 err_mask = ata_dev_set_xfermode(dev);
3399 }
3400
3401 if (err_mask & ~AC_ERR_DEV)
3402 goto fail;
3403
3404 /* revalidate */
3405 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3406 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3407 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3408 if (rc)
3409 return rc;
3410
3411 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3412 /* Old CFA may refuse this command, which is just fine */
3413 if (ata_id_is_cfa(dev->id))
3414 ign_dev_err = 1;
3415 /* Catch several broken garbage emulations plus some pre
3416 ATA devices */
3417 if (ata_id_major_version(dev->id) == 0 &&
3418 dev->pio_mode <= XFER_PIO_2)
3419 ign_dev_err = 1;
3420 /* Some very old devices and some bad newer ones fail
3421 any kind of SET_XFERMODE request but support PIO0-2
3422 timings and no IORDY */
3423 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3424 ign_dev_err = 1;
3425 }
3426 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3427 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3428 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3429 dev->dma_mode == XFER_MW_DMA_0 &&
3430 (dev->id[63] >> 8) & 1)
3431 ign_dev_err = 1;
3432
3433 /* if the device is actually configured correctly, ignore dev err */
3434 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3435 ign_dev_err = 1;
3436
3437 if (err_mask & AC_ERR_DEV) {
3438 if (!ign_dev_err)
3439 goto fail;
3440 else
3441 dev_err_whine = " (device error ignored)";
3442 }
3443
3444 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3445 dev->xfer_shift, (int)dev->xfer_mode);
3446
3447 ata_dev_info(dev, "configured for %s%s\n",
3448 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3449 dev_err_whine);
3450
3451 return 0;
3452
3453 fail:
3454 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3455 return -EIO;
3456 }
3457
3458 /**
3459 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3460 * @link: link on which timings will be programmed
3461 * @r_failed_dev: out parameter for failed device
3462 *
3463 * Standard implementation of the function used to tune and set
3464 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3465 * ata_dev_set_mode() fails, pointer to the failing device is
3466 * returned in @r_failed_dev.
3467 *
3468 * LOCKING:
3469 * PCI/etc. bus probe sem.
3470 *
3471 * RETURNS:
3472 * 0 on success, negative errno otherwise
3473 */
3474
3475 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3476 {
3477 struct ata_port *ap = link->ap;
3478 struct ata_device *dev;
3479 int rc = 0, used_dma = 0, found = 0;
3480
3481 /* step 1: calculate xfer_mask */
3482 ata_for_each_dev(dev, link, ENABLED) {
3483 unsigned long pio_mask, dma_mask;
3484 unsigned int mode_mask;
3485
3486 mode_mask = ATA_DMA_MASK_ATA;
3487 if (dev->class == ATA_DEV_ATAPI)
3488 mode_mask = ATA_DMA_MASK_ATAPI;
3489 else if (ata_id_is_cfa(dev->id))
3490 mode_mask = ATA_DMA_MASK_CFA;
3491
3492 ata_dev_xfermask(dev);
3493 ata_force_xfermask(dev);
3494
3495 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3496
3497 if (libata_dma_mask & mode_mask)
3498 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3499 dev->udma_mask);
3500 else
3501 dma_mask = 0;
3502
3503 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3504 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3505
3506 found = 1;
3507 if (ata_dma_enabled(dev))
3508 used_dma = 1;
3509 }
3510 if (!found)
3511 goto out;
3512
3513 /* step 2: always set host PIO timings */
3514 ata_for_each_dev(dev, link, ENABLED) {
3515 if (dev->pio_mode == 0xff) {
3516 ata_dev_warn(dev, "no PIO support\n");
3517 rc = -EINVAL;
3518 goto out;
3519 }
3520
3521 dev->xfer_mode = dev->pio_mode;
3522 dev->xfer_shift = ATA_SHIFT_PIO;
3523 if (ap->ops->set_piomode)
3524 ap->ops->set_piomode(ap, dev);
3525 }
3526
3527 /* step 3: set host DMA timings */
3528 ata_for_each_dev(dev, link, ENABLED) {
3529 if (!ata_dma_enabled(dev))
3530 continue;
3531
3532 dev->xfer_mode = dev->dma_mode;
3533 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3534 if (ap->ops->set_dmamode)
3535 ap->ops->set_dmamode(ap, dev);
3536 }
3537
3538 /* step 4: update devices' xfer mode */
3539 ata_for_each_dev(dev, link, ENABLED) {
3540 rc = ata_dev_set_mode(dev);
3541 if (rc)
3542 goto out;
3543 }
3544
3545 /* Record simplex status. If we selected DMA then the other
3546 * host channels are not permitted to do so.
3547 */
3548 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3549 ap->host->simplex_claimed = ap;
3550
3551 out:
3552 if (rc)
3553 *r_failed_dev = dev;
3554 return rc;
3555 }
3556
3557 /**
3558 * ata_wait_ready - wait for link to become ready
3559 * @link: link to be waited on
3560 * @deadline: deadline jiffies for the operation
3561 * @check_ready: callback to check link readiness
3562 *
3563 * Wait for @link to become ready. @check_ready should return
3564 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3565 * link doesn't seem to be occupied, other errno for other error
3566 * conditions.
3567 *
3568 * Transient -ENODEV conditions are allowed for
3569 * ATA_TMOUT_FF_WAIT.
3570 *
3571 * LOCKING:
3572 * EH context.
3573 *
3574 * RETURNS:
3575 * 0 if @link is ready before @deadline; otherwise, -errno.
3576 */
3577 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3578 int (*check_ready)(struct ata_link *link))
3579 {
3580 unsigned long start = jiffies;
3581 unsigned long nodev_deadline;
3582 int warned = 0;
3583
3584 /* choose which 0xff timeout to use, read comment in libata.h */
3585 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3586 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3587 else
3588 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3589
3590 /* Slave readiness can't be tested separately from master. On
3591 * M/S emulation configuration, this function should be called
3592 * only on the master and it will handle both master and slave.
3593 */
3594 WARN_ON(link == link->ap->slave_link);
3595
3596 if (time_after(nodev_deadline, deadline))
3597 nodev_deadline = deadline;
3598
3599 while (1) {
3600 unsigned long now = jiffies;
3601 int ready, tmp;
3602
3603 ready = tmp = check_ready(link);
3604 if (ready > 0)
3605 return 0;
3606
3607 /*
3608 * -ENODEV could be transient. Ignore -ENODEV if link
3609 * is online. Also, some SATA devices take a long
3610 * time to clear 0xff after reset. Wait for
3611 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3612 * offline.
3613 *
3614 * Note that some PATA controllers (pata_ali) explode
3615 * if status register is read more than once when
3616 * there's no device attached.
3617 */
3618 if (ready == -ENODEV) {
3619 if (ata_link_online(link))
3620 ready = 0;
3621 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3622 !ata_link_offline(link) &&
3623 time_before(now, nodev_deadline))
3624 ready = 0;
3625 }
3626
3627 if (ready)
3628 return ready;
3629 if (time_after(now, deadline))
3630 return -EBUSY;
3631
3632 if (!warned && time_after(now, start + 5 * HZ) &&
3633 (deadline - now > 3 * HZ)) {
3634 ata_link_warn(link,
3635 "link is slow to respond, please be patient "
3636 "(ready=%d)\n", tmp);
3637 warned = 1;
3638 }
3639
3640 ata_msleep(link->ap, 50);
3641 }
3642 }
3643
3644 /**
3645 * ata_wait_after_reset - wait for link to become ready after reset
3646 * @link: link to be waited on
3647 * @deadline: deadline jiffies for the operation
3648 * @check_ready: callback to check link readiness
3649 *
3650 * Wait for @link to become ready after reset.
3651 *
3652 * LOCKING:
3653 * EH context.
3654 *
3655 * RETURNS:
3656 * 0 if @link is ready before @deadline; otherwise, -errno.
3657 */
3658 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3659 int (*check_ready)(struct ata_link *link))
3660 {
3661 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3662
3663 return ata_wait_ready(link, deadline, check_ready);
3664 }
3665
3666 /**
3667 * sata_link_debounce - debounce SATA phy status
3668 * @link: ATA link to debounce SATA phy status for
3669 * @params: timing parameters { interval, duration, timeout } in msec
3670 * @deadline: deadline jiffies for the operation
3671 *
3672 * Make sure SStatus of @link reaches stable state, determined by
3673 * holding the same value where DET is not 1 for @duration polled
3674 * every @interval, before @timeout. Timeout constraints the
3675 * beginning of the stable state. Because DET gets stuck at 1 on
3676 * some controllers after hot unplugging, this functions waits
3677 * until timeout then returns 0 if DET is stable at 1.
3678 *
3679 * @timeout is further limited by @deadline. The sooner of the
3680 * two is used.
3681 *
3682 * LOCKING:
3683 * Kernel thread context (may sleep)
3684 *
3685 * RETURNS:
3686 * 0 on success, -errno on failure.
3687 */
3688 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3689 unsigned long deadline)
3690 {
3691 unsigned long interval = params[0];
3692 unsigned long duration = params[1];
3693 unsigned long last_jiffies, t;
3694 u32 last, cur;
3695 int rc;
3696
3697 t = ata_deadline(jiffies, params[2]);
3698 if (time_before(t, deadline))
3699 deadline = t;
3700
3701 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3702 return rc;
3703 cur &= 0xf;
3704
3705 last = cur;
3706 last_jiffies = jiffies;
3707
3708 while (1) {
3709 ata_msleep(link->ap, interval);
3710 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3711 return rc;
3712 cur &= 0xf;
3713
3714 /* DET stable? */
3715 if (cur == last) {
3716 if (cur == 1 && time_before(jiffies, deadline))
3717 continue;
3718 if (time_after(jiffies,
3719 ata_deadline(last_jiffies, duration)))
3720 return 0;
3721 continue;
3722 }
3723
3724 /* unstable, start over */
3725 last = cur;
3726 last_jiffies = jiffies;
3727
3728 /* Check deadline. If debouncing failed, return
3729 * -EPIPE to tell upper layer to lower link speed.
3730 */
3731 if (time_after(jiffies, deadline))
3732 return -EPIPE;
3733 }
3734 }
3735
3736 /**
3737 * sata_link_resume - resume SATA link
3738 * @link: ATA link to resume SATA
3739 * @params: timing parameters { interval, duration, timeout } in msec
3740 * @deadline: deadline jiffies for the operation
3741 *
3742 * Resume SATA phy @link and debounce it.
3743 *
3744 * LOCKING:
3745 * Kernel thread context (may sleep)
3746 *
3747 * RETURNS:
3748 * 0 on success, -errno on failure.
3749 */
3750 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3751 unsigned long deadline)
3752 {
3753 int tries = ATA_LINK_RESUME_TRIES;
3754 u32 scontrol, serror;
3755 int rc;
3756
3757 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3758 return rc;
3759
3760 /*
3761 * Writes to SControl sometimes get ignored under certain
3762 * controllers (ata_piix SIDPR). Make sure DET actually is
3763 * cleared.
3764 */
3765 do {
3766 scontrol = (scontrol & 0x0f0) | 0x300;
3767 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3768 return rc;
3769 /*
3770 * Some PHYs react badly if SStatus is pounded
3771 * immediately after resuming. Delay 200ms before
3772 * debouncing.
3773 */
3774 if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
3775 ata_msleep(link->ap, 200);
3776
3777 /* is SControl restored correctly? */
3778 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3779 return rc;
3780 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3781
3782 if ((scontrol & 0xf0f) != 0x300) {
3783 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3784 scontrol);
3785 return 0;
3786 }
3787
3788 if (tries < ATA_LINK_RESUME_TRIES)
3789 ata_link_warn(link, "link resume succeeded after %d retries\n",
3790 ATA_LINK_RESUME_TRIES - tries);
3791
3792 if ((rc = sata_link_debounce(link, params, deadline)))
3793 return rc;
3794
3795 /* clear SError, some PHYs require this even for SRST to work */
3796 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3797 rc = sata_scr_write(link, SCR_ERROR, serror);
3798
3799 return rc != -EINVAL ? rc : 0;
3800 }
3801
3802 /**
3803 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3804 * @link: ATA link to manipulate SControl for
3805 * @policy: LPM policy to configure
3806 * @spm_wakeup: initiate LPM transition to active state
3807 *
3808 * Manipulate the IPM field of the SControl register of @link
3809 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
3810 * @spm_wakeup is %true, the SPM field is manipulated to wake up
3811 * the link. This function also clears PHYRDY_CHG before
3812 * returning.
3813 *
3814 * LOCKING:
3815 * EH context.
3816 *
3817 * RETURNS:
3818 * 0 on success, -errno otherwise.
3819 */
3820 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3821 bool spm_wakeup)
3822 {
3823 struct ata_eh_context *ehc = &link->eh_context;
3824 bool woken_up = false;
3825 u32 scontrol;
3826 int rc;
3827
3828 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3829 if (rc)
3830 return rc;
3831
3832 switch (policy) {
3833 case ATA_LPM_MAX_POWER:
3834 /* disable all LPM transitions */
3835 scontrol |= (0x7 << 8);
3836 /* initiate transition to active state */
3837 if (spm_wakeup) {
3838 scontrol |= (0x4 << 12);
3839 woken_up = true;
3840 }
3841 break;
3842 case ATA_LPM_MED_POWER:
3843 /* allow LPM to PARTIAL */
3844 scontrol &= ~(0x1 << 8);
3845 scontrol |= (0x6 << 8);
3846 break;
3847 case ATA_LPM_MIN_POWER:
3848 if (ata_link_nr_enabled(link) > 0)
3849 /* no restrictions on LPM transitions */
3850 scontrol &= ~(0x7 << 8);
3851 else {
3852 /* empty port, power off */
3853 scontrol &= ~0xf;
3854 scontrol |= (0x1 << 2);
3855 }
3856 break;
3857 default:
3858 WARN_ON(1);
3859 }
3860
3861 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3862 if (rc)
3863 return rc;
3864
3865 /* give the link time to transit out of LPM state */
3866 if (woken_up)
3867 msleep(10);
3868
3869 /* clear PHYRDY_CHG from SError */
3870 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3871 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3872 }
3873
3874 /**
3875 * ata_std_prereset - prepare for reset
3876 * @link: ATA link to be reset
3877 * @deadline: deadline jiffies for the operation
3878 *
3879 * @link is about to be reset. Initialize it. Failure from
3880 * prereset makes libata abort whole reset sequence and give up
3881 * that port, so prereset should be best-effort. It does its
3882 * best to prepare for reset sequence but if things go wrong, it
3883 * should just whine, not fail.
3884 *
3885 * LOCKING:
3886 * Kernel thread context (may sleep)
3887 *
3888 * RETURNS:
3889 * 0 on success, -errno otherwise.
3890 */
3891 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3892 {
3893 struct ata_port *ap = link->ap;
3894 struct ata_eh_context *ehc = &link->eh_context;
3895 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3896 int rc;
3897
3898 /* if we're about to do hardreset, nothing more to do */
3899 if (ehc->i.action & ATA_EH_HARDRESET)
3900 return 0;
3901
3902 /* if SATA, resume link */
3903 if (ap->flags & ATA_FLAG_SATA) {
3904 rc = sata_link_resume(link, timing, deadline);
3905 /* whine about phy resume failure but proceed */
3906 if (rc && rc != -EOPNOTSUPP)
3907 ata_link_warn(link,
3908 "failed to resume link for reset (errno=%d)\n",
3909 rc);
3910 }
3911
3912 /* no point in trying softreset on offline link */
3913 if (ata_phys_link_offline(link))
3914 ehc->i.action &= ~ATA_EH_SOFTRESET;
3915
3916 return 0;
3917 }
3918
3919 /**
3920 * sata_link_hardreset - reset link via SATA phy reset
3921 * @link: link to reset
3922 * @timing: timing parameters { interval, duration, timeout } in msec
3923 * @deadline: deadline jiffies for the operation
3924 * @online: optional out parameter indicating link onlineness
3925 * @check_ready: optional callback to check link readiness
3926 *
3927 * SATA phy-reset @link using DET bits of SControl register.
3928 * After hardreset, link readiness is waited upon using
3929 * ata_wait_ready() if @check_ready is specified. LLDs are
3930 * allowed to not specify @check_ready and wait itself after this
3931 * function returns. Device classification is LLD's
3932 * responsibility.
3933 *
3934 * *@online is set to one iff reset succeeded and @link is online
3935 * after reset.
3936 *
3937 * LOCKING:
3938 * Kernel thread context (may sleep)
3939 *
3940 * RETURNS:
3941 * 0 on success, -errno otherwise.
3942 */
3943 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3944 unsigned long deadline,
3945 bool *online, int (*check_ready)(struct ata_link *))
3946 {
3947 u32 scontrol;
3948 int rc;
3949
3950 DPRINTK("ENTER\n");
3951
3952 if (online)
3953 *online = false;
3954
3955 if (sata_set_spd_needed(link)) {
3956 /* SATA spec says nothing about how to reconfigure
3957 * spd. To be on the safe side, turn off phy during
3958 * reconfiguration. This works for at least ICH7 AHCI
3959 * and Sil3124.
3960 */
3961 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3962 goto out;
3963
3964 scontrol = (scontrol & 0x0f0) | 0x304;
3965
3966 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3967 goto out;
3968
3969 sata_set_spd(link);
3970 }
3971
3972 /* issue phy wake/reset */
3973 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3974 goto out;
3975
3976 scontrol = (scontrol & 0x0f0) | 0x301;
3977
3978 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3979 goto out;
3980
3981 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3982 * 10.4.2 says at least 1 ms.
3983 */
3984 ata_msleep(link->ap, 1);
3985
3986 /* bring link back */
3987 rc = sata_link_resume(link, timing, deadline);
3988 if (rc)
3989 goto out;
3990 /* if link is offline nothing more to do */
3991 if (ata_phys_link_offline(link))
3992 goto out;
3993
3994 /* Link is online. From this point, -ENODEV too is an error. */
3995 if (online)
3996 *online = true;
3997
3998 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3999 /* If PMP is supported, we have to do follow-up SRST.
4000 * Some PMPs don't send D2H Reg FIS after hardreset if
4001 * the first port is empty. Wait only for
4002 * ATA_TMOUT_PMP_SRST_WAIT.
4003 */
4004 if (check_ready) {
4005 unsigned long pmp_deadline;
4006
4007 pmp_deadline = ata_deadline(jiffies,
4008 ATA_TMOUT_PMP_SRST_WAIT);
4009 if (time_after(pmp_deadline, deadline))
4010 pmp_deadline = deadline;
4011 ata_wait_ready(link, pmp_deadline, check_ready);
4012 }
4013 rc = -EAGAIN;
4014 goto out;
4015 }
4016
4017 rc = 0;
4018 if (check_ready)
4019 rc = ata_wait_ready(link, deadline, check_ready);
4020 out:
4021 if (rc && rc != -EAGAIN) {
4022 /* online is set iff link is online && reset succeeded */
4023 if (online)
4024 *online = false;
4025 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
4026 }
4027 DPRINTK("EXIT, rc=%d\n", rc);
4028 return rc;
4029 }
4030
4031 /**
4032 * sata_std_hardreset - COMRESET w/o waiting or classification
4033 * @link: link to reset
4034 * @class: resulting class of attached device
4035 * @deadline: deadline jiffies for the operation
4036 *
4037 * Standard SATA COMRESET w/o waiting or classification.
4038 *
4039 * LOCKING:
4040 * Kernel thread context (may sleep)
4041 *
4042 * RETURNS:
4043 * 0 if link offline, -EAGAIN if link online, -errno on errors.
4044 */
4045 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4046 unsigned long deadline)
4047 {
4048 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4049 bool online;
4050 int rc;
4051
4052 /* do hardreset */
4053 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
4054 return online ? -EAGAIN : rc;
4055 }
4056
4057 /**
4058 * ata_std_postreset - standard postreset callback
4059 * @link: the target ata_link
4060 * @classes: classes of attached devices
4061 *
4062 * This function is invoked after a successful reset. Note that
4063 * the device might have been reset more than once using
4064 * different reset methods before postreset is invoked.
4065 *
4066 * LOCKING:
4067 * Kernel thread context (may sleep)
4068 */
4069 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4070 {
4071 u32 serror;
4072
4073 DPRINTK("ENTER\n");
4074
4075 /* reset complete, clear SError */
4076 if (!sata_scr_read(link, SCR_ERROR, &serror))
4077 sata_scr_write(link, SCR_ERROR, serror);
4078
4079 /* print link status */
4080 sata_print_link_status(link);
4081
4082 DPRINTK("EXIT\n");
4083 }
4084
4085 /**
4086 * ata_dev_same_device - Determine whether new ID matches configured device
4087 * @dev: device to compare against
4088 * @new_class: class of the new device
4089 * @new_id: IDENTIFY page of the new device
4090 *
4091 * Compare @new_class and @new_id against @dev and determine
4092 * whether @dev is the device indicated by @new_class and
4093 * @new_id.
4094 *
4095 * LOCKING:
4096 * None.
4097 *
4098 * RETURNS:
4099 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4100 */
4101 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4102 const u16 *new_id)
4103 {
4104 const u16 *old_id = dev->id;
4105 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4106 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4107
4108 if (dev->class != new_class) {
4109 ata_dev_info(dev, "class mismatch %d != %d\n",
4110 dev->class, new_class);
4111 return 0;
4112 }
4113
4114 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4115 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4116 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4117 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4118
4119 if (strcmp(model[0], model[1])) {
4120 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
4121 model[0], model[1]);
4122 return 0;
4123 }
4124
4125 if (strcmp(serial[0], serial[1])) {
4126 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
4127 serial[0], serial[1]);
4128 return 0;
4129 }
4130
4131 return 1;
4132 }
4133
4134 /**
4135 * ata_dev_reread_id - Re-read IDENTIFY data
4136 * @dev: target ATA device
4137 * @readid_flags: read ID flags
4138 *
4139 * Re-read IDENTIFY page and make sure @dev is still attached to
4140 * the port.
4141 *
4142 * LOCKING:
4143 * Kernel thread context (may sleep)
4144 *
4145 * RETURNS:
4146 * 0 on success, negative errno otherwise
4147 */
4148 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4149 {
4150 unsigned int class = dev->class;
4151 u16 *id = (void *)dev->link->ap->sector_buf;
4152 int rc;
4153
4154 /* read ID data */
4155 rc = ata_dev_read_id(dev, &class, readid_flags, id);
4156 if (rc)
4157 return rc;
4158
4159 /* is the device still there? */
4160 if (!ata_dev_same_device(dev, class, id))
4161 return -ENODEV;
4162
4163 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4164 return 0;
4165 }
4166
4167 /**
4168 * ata_dev_revalidate - Revalidate ATA device
4169 * @dev: device to revalidate
4170 * @new_class: new class code
4171 * @readid_flags: read ID flags
4172 *
4173 * Re-read IDENTIFY page, make sure @dev is still attached to the
4174 * port and reconfigure it according to the new IDENTIFY page.
4175 *
4176 * LOCKING:
4177 * Kernel thread context (may sleep)
4178 *
4179 * RETURNS:
4180 * 0 on success, negative errno otherwise
4181 */
4182 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4183 unsigned int readid_flags)
4184 {
4185 u64 n_sectors = dev->n_sectors;
4186 u64 n_native_sectors = dev->n_native_sectors;
4187 int rc;
4188
4189 if (!ata_dev_enabled(dev))
4190 return -ENODEV;
4191
4192 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4193 if (ata_class_enabled(new_class) &&
4194 new_class != ATA_DEV_ATA &&
4195 new_class != ATA_DEV_ATAPI &&
4196 new_class != ATA_DEV_ZAC &&
4197 new_class != ATA_DEV_SEMB) {
4198 ata_dev_info(dev, "class mismatch %u != %u\n",
4199 dev->class, new_class);
4200 rc = -ENODEV;
4201 goto fail;
4202 }
4203
4204 /* re-read ID */
4205 rc = ata_dev_reread_id(dev, readid_flags);
4206 if (rc)
4207 goto fail;
4208
4209 /* configure device according to the new ID */
4210 rc = ata_dev_configure(dev);
4211 if (rc)
4212 goto fail;
4213
4214 /* verify n_sectors hasn't changed */
4215 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4216 dev->n_sectors == n_sectors)
4217 return 0;
4218
4219 /* n_sectors has changed */
4220 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4221 (unsigned long long)n_sectors,
4222 (unsigned long long)dev->n_sectors);
4223
4224 /*
4225 * Something could have caused HPA to be unlocked
4226 * involuntarily. If n_native_sectors hasn't changed and the
4227 * new size matches it, keep the device.
4228 */
4229 if (dev->n_native_sectors == n_native_sectors &&
4230 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4231 ata_dev_warn(dev,
4232 "new n_sectors matches native, probably "
4233 "late HPA unlock, n_sectors updated\n");
4234 /* use the larger n_sectors */
4235 return 0;
4236 }
4237
4238 /*
4239 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4240 * unlocking HPA in those cases.
4241 *
4242 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4243 */
4244 if (dev->n_native_sectors == n_native_sectors &&
4245 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4246 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4247 ata_dev_warn(dev,
4248 "old n_sectors matches native, probably "
4249 "late HPA lock, will try to unlock HPA\n");
4250 /* try unlocking HPA */
4251 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4252 rc = -EIO;
4253 } else
4254 rc = -ENODEV;
4255
4256 /* restore original n_[native_]sectors and fail */
4257 dev->n_native_sectors = n_native_sectors;
4258 dev->n_sectors = n_sectors;
4259 fail:
4260 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4261 return rc;
4262 }
4263
4264 struct ata_blacklist_entry {
4265 const char *model_num;
4266 const char *model_rev;
4267 unsigned long horkage;
4268 };
4269
4270 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4271 /* Devices with DMA related problems under Linux */
4272 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4273 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4274 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4275 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4276 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4277 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4278 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4279 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4280 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4281 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4282 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4283 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4284 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4285 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4286 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4287 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4288 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4289 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4290 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4291 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4292 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4293 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4294 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4295 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4296 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4297 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4298 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4299 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4300 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4301 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
4302 /* Odd clown on sil3726/4726 PMPs */
4303 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4304
4305 /* Weird ATAPI devices */
4306 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4307 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4308 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4309 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4310
4311 /*
4312 * Causes silent data corruption with higher max sects.
4313 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4314 */
4315 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4316
4317 /*
4318 * Device times out with higher max sects.
4319 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4320 */
4321 { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4322
4323 /* Devices we expect to fail diagnostics */
4324
4325 /* Devices where NCQ should be avoided */
4326 /* NCQ is slow */
4327 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4328 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4329 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4330 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4331 /* NCQ is broken */
4332 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4333 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4334 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4335 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4336 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4337
4338 /* Seagate NCQ + FLUSH CACHE firmware bug */
4339 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4340 ATA_HORKAGE_FIRMWARE_WARN },
4341
4342 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4343 ATA_HORKAGE_FIRMWARE_WARN },
4344
4345 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4346 ATA_HORKAGE_FIRMWARE_WARN },
4347
4348 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4349 ATA_HORKAGE_FIRMWARE_WARN },
4350
4351 /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
4352 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4353 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4354 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
4355
4356 /* Blacklist entries taken from Silicon Image 3124/3132
4357 Windows driver .inf file - also several Linux problem reports */
4358 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4359 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4360 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4361
4362 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4363 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4364
4365 /* devices which puke on READ_NATIVE_MAX */
4366 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4367 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4368 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4369 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4370
4371 /* this one allows HPA unlocking but fails IOs on the area */
4372 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4373
4374 /* Devices which report 1 sector over size HPA */
4375 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4376 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4377 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4378
4379 /* Devices which get the IVB wrong */
4380 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4381 /* Maybe we should just blacklist TSSTcorp... */
4382 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4383
4384 /* Devices that do not need bridging limits applied */
4385 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4386 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4387
4388 /* Devices which aren't very happy with higher link speeds */
4389 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4390 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
4391
4392 /*
4393 * Devices which choke on SETXFER. Applies only if both the
4394 * device and controller are SATA.
4395 */
4396 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4397 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4398 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4399 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4400 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4401
4402 /* devices that don't properly handle queued TRIM commands */
4403 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4404 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4405 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4406 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4407 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4408 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4409 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4410 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4411 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4412 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4413 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4414 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4415 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4416 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4417
4418 /* devices that don't properly handle TRIM commands */
4419 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4420
4421 /*
4422 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4423 * (Return Zero After Trim) flags in the ATA Command Set are
4424 * unreliable in the sense that they only define what happens if
4425 * the device successfully executed the DSM TRIM command. TRIM
4426 * is only advisory, however, and the device is free to silently
4427 * ignore all or parts of the request.
4428 *
4429 * Whitelist drives that are known to reliably return zeroes
4430 * after TRIM.
4431 */
4432
4433 /*
4434 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4435 * that model before whitelisting all other intel SSDs.
4436 */
4437 { "INTEL*SSDSC2MH*", NULL, 0, },
4438
4439 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4440 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4441 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4442 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4443 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4444 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4445 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4446
4447 /*
4448 * Some WD SATA-I drives spin up and down erratically when the link
4449 * is put into the slumber mode. We don't have full list of the
4450 * affected devices. Disable LPM if the device matches one of the
4451 * known prefixes and is SATA-1. As a side effect LPM partial is
4452 * lost too.
4453 *
4454 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4455 */
4456 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4457 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4458 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4459 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4460 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4461 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4462 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4463
4464 /* End Marker */
4465 { }
4466 };
4467
4468 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4469 {
4470 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4471 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4472 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4473
4474 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4475 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4476
4477 while (ad->model_num) {
4478 if (glob_match(ad->model_num, model_num)) {
4479 if (ad->model_rev == NULL)
4480 return ad->horkage;
4481 if (glob_match(ad->model_rev, model_rev))
4482 return ad->horkage;
4483 }
4484 ad++;
4485 }
4486 return 0;
4487 }
4488
4489 static int ata_dma_blacklisted(const struct ata_device *dev)
4490 {
4491 /* We don't support polling DMA.
4492 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4493 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4494 */
4495 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4496 (dev->flags & ATA_DFLAG_CDB_INTR))
4497 return 1;
4498 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4499 }
4500
4501 /**
4502 * ata_is_40wire - check drive side detection
4503 * @dev: device
4504 *
4505 * Perform drive side detection decoding, allowing for device vendors
4506 * who can't follow the documentation.
4507 */
4508
4509 static int ata_is_40wire(struct ata_device *dev)
4510 {
4511 if (dev->horkage & ATA_HORKAGE_IVB)
4512 return ata_drive_40wire_relaxed(dev->id);
4513 return ata_drive_40wire(dev->id);
4514 }
4515
4516 /**
4517 * cable_is_40wire - 40/80/SATA decider
4518 * @ap: port to consider
4519 *
4520 * This function encapsulates the policy for speed management
4521 * in one place. At the moment we don't cache the result but
4522 * there is a good case for setting ap->cbl to the result when
4523 * we are called with unknown cables (and figuring out if it
4524 * impacts hotplug at all).
4525 *
4526 * Return 1 if the cable appears to be 40 wire.
4527 */
4528
4529 static int cable_is_40wire(struct ata_port *ap)
4530 {
4531 struct ata_link *link;
4532 struct ata_device *dev;
4533
4534 /* If the controller thinks we are 40 wire, we are. */
4535 if (ap->cbl == ATA_CBL_PATA40)
4536 return 1;
4537
4538 /* If the controller thinks we are 80 wire, we are. */
4539 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4540 return 0;
4541
4542 /* If the system is known to be 40 wire short cable (eg
4543 * laptop), then we allow 80 wire modes even if the drive
4544 * isn't sure.
4545 */
4546 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4547 return 0;
4548
4549 /* If the controller doesn't know, we scan.
4550 *
4551 * Note: We look for all 40 wire detects at this point. Any
4552 * 80 wire detect is taken to be 80 wire cable because
4553 * - in many setups only the one drive (slave if present) will
4554 * give a valid detect
4555 * - if you have a non detect capable drive you don't want it
4556 * to colour the choice
4557 */
4558 ata_for_each_link(link, ap, EDGE) {
4559 ata_for_each_dev(dev, link, ENABLED) {
4560 if (!ata_is_40wire(dev))
4561 return 0;
4562 }
4563 }
4564 return 1;
4565 }
4566
4567 /**
4568 * ata_dev_xfermask - Compute supported xfermask of the given device
4569 * @dev: Device to compute xfermask for
4570 *
4571 * Compute supported xfermask of @dev and store it in
4572 * dev->*_mask. This function is responsible for applying all
4573 * known limits including host controller limits, device
4574 * blacklist, etc...
4575 *
4576 * LOCKING:
4577 * None.
4578 */
4579 static void ata_dev_xfermask(struct ata_device *dev)
4580 {
4581 struct ata_link *link = dev->link;
4582 struct ata_port *ap = link->ap;
4583 struct ata_host *host = ap->host;
4584 unsigned long xfer_mask;
4585
4586 /* controller modes available */
4587 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4588 ap->mwdma_mask, ap->udma_mask);
4589
4590 /* drive modes available */
4591 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4592 dev->mwdma_mask, dev->udma_mask);
4593 xfer_mask &= ata_id_xfermask(dev->id);
4594
4595 /*
4596 * CFA Advanced TrueIDE timings are not allowed on a shared
4597 * cable
4598 */
4599 if (ata_dev_pair(dev)) {
4600 /* No PIO5 or PIO6 */
4601 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4602 /* No MWDMA3 or MWDMA 4 */
4603 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4604 }
4605
4606 if (ata_dma_blacklisted(dev)) {
4607 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4608 ata_dev_warn(dev,
4609 "device is on DMA blacklist, disabling DMA\n");
4610 }
4611
4612 if ((host->flags & ATA_HOST_SIMPLEX) &&
4613 host->simplex_claimed && host->simplex_claimed != ap) {
4614 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4615 ata_dev_warn(dev,
4616 "simplex DMA is claimed by other device, disabling DMA\n");
4617 }
4618
4619 if (ap->flags & ATA_FLAG_NO_IORDY)
4620 xfer_mask &= ata_pio_mask_no_iordy(dev);
4621
4622 if (ap->ops->mode_filter)
4623 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4624
4625 /* Apply cable rule here. Don't apply it early because when
4626 * we handle hot plug the cable type can itself change.
4627 * Check this last so that we know if the transfer rate was
4628 * solely limited by the cable.
4629 * Unknown or 80 wire cables reported host side are checked
4630 * drive side as well. Cases where we know a 40wire cable
4631 * is used safely for 80 are not checked here.
4632 */
4633 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4634 /* UDMA/44 or higher would be available */
4635 if (cable_is_40wire(ap)) {
4636 ata_dev_warn(dev,
4637 "limited to UDMA/33 due to 40-wire cable\n");
4638 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4639 }
4640
4641 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4642 &dev->mwdma_mask, &dev->udma_mask);
4643 }
4644
4645 /**
4646 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4647 * @dev: Device to which command will be sent
4648 *
4649 * Issue SET FEATURES - XFER MODE command to device @dev
4650 * on port @ap.
4651 *
4652 * LOCKING:
4653 * PCI/etc. bus probe sem.
4654 *
4655 * RETURNS:
4656 * 0 on success, AC_ERR_* mask otherwise.
4657 */
4658
4659 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4660 {
4661 struct ata_taskfile tf;
4662 unsigned int err_mask;
4663
4664 /* set up set-features taskfile */
4665 DPRINTK("set features - xfer mode\n");
4666
4667 /* Some controllers and ATAPI devices show flaky interrupt
4668 * behavior after setting xfer mode. Use polling instead.
4669 */
4670 ata_tf_init(dev, &tf);
4671 tf.command = ATA_CMD_SET_FEATURES;
4672 tf.feature = SETFEATURES_XFER;
4673 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4674 tf.protocol = ATA_PROT_NODATA;
4675 /* If we are using IORDY we must send the mode setting command */
4676 if (ata_pio_need_iordy(dev))
4677 tf.nsect = dev->xfer_mode;
4678 /* If the device has IORDY and the controller does not - turn it off */
4679 else if (ata_id_has_iordy(dev->id))
4680 tf.nsect = 0x01;
4681 else /* In the ancient relic department - skip all of this */
4682 return 0;
4683
4684 /* On some disks, this command causes spin-up, so we need longer timeout */
4685 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4686
4687 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4688 return err_mask;
4689 }
4690
4691 /**
4692 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4693 * @dev: Device to which command will be sent
4694 * @enable: Whether to enable or disable the feature
4695 * @feature: The sector count represents the feature to set
4696 *
4697 * Issue SET FEATURES - SATA FEATURES command to device @dev
4698 * on port @ap with sector count
4699 *
4700 * LOCKING:
4701 * PCI/etc. bus probe sem.
4702 *
4703 * RETURNS:
4704 * 0 on success, AC_ERR_* mask otherwise.
4705 */
4706 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4707 {
4708 struct ata_taskfile tf;
4709 unsigned int err_mask;
4710 unsigned long timeout = 0;
4711
4712 /* set up set-features taskfile */
4713 DPRINTK("set features - SATA features\n");
4714
4715 ata_tf_init(dev, &tf);
4716 tf.command = ATA_CMD_SET_FEATURES;
4717 tf.feature = enable;
4718 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4719 tf.protocol = ATA_PROT_NODATA;
4720 tf.nsect = feature;
4721
4722 if (enable == SETFEATURES_SPINUP)
4723 timeout = ata_probe_timeout ?
4724 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4725 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4726
4727 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4728 return err_mask;
4729 }
4730 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4731
4732 /**
4733 * ata_dev_init_params - Issue INIT DEV PARAMS command
4734 * @dev: Device to which command will be sent
4735 * @heads: Number of heads (taskfile parameter)
4736 * @sectors: Number of sectors (taskfile parameter)
4737 *
4738 * LOCKING:
4739 * Kernel thread context (may sleep)
4740 *
4741 * RETURNS:
4742 * 0 on success, AC_ERR_* mask otherwise.
4743 */
4744 static unsigned int ata_dev_init_params(struct ata_device *dev,
4745 u16 heads, u16 sectors)
4746 {
4747 struct ata_taskfile tf;
4748 unsigned int err_mask;
4749
4750 /* Number of sectors per track 1-255. Number of heads 1-16 */
4751 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4752 return AC_ERR_INVALID;
4753
4754 /* set up init dev params taskfile */
4755 DPRINTK("init dev params \n");
4756
4757 ata_tf_init(dev, &tf);
4758 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4759 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4760 tf.protocol = ATA_PROT_NODATA;
4761 tf.nsect = sectors;
4762 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4763
4764 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4765 /* A clean abort indicates an original or just out of spec drive
4766 and we should continue as we issue the setup based on the
4767 drive reported working geometry */
4768 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4769 err_mask = 0;
4770
4771 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4772 return err_mask;
4773 }
4774
4775 /**
4776 * ata_sg_clean - Unmap DMA memory associated with command
4777 * @qc: Command containing DMA memory to be released
4778 *
4779 * Unmap all mapped DMA memory associated with this command.
4780 *
4781 * LOCKING:
4782 * spin_lock_irqsave(host lock)
4783 */
4784 void ata_sg_clean(struct ata_queued_cmd *qc)
4785 {
4786 struct ata_port *ap = qc->ap;
4787 struct scatterlist *sg = qc->sg;
4788 int dir = qc->dma_dir;
4789
4790 WARN_ON_ONCE(sg == NULL);
4791
4792 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4793
4794 if (qc->n_elem)
4795 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4796
4797 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4798 qc->sg = NULL;
4799 }
4800
4801 /**
4802 * atapi_check_dma - Check whether ATAPI DMA can be supported
4803 * @qc: Metadata associated with taskfile to check
4804 *
4805 * Allow low-level driver to filter ATA PACKET commands, returning
4806 * a status indicating whether or not it is OK to use DMA for the
4807 * supplied PACKET command.
4808 *
4809 * LOCKING:
4810 * spin_lock_irqsave(host lock)
4811 *
4812 * RETURNS: 0 when ATAPI DMA can be used
4813 * nonzero otherwise
4814 */
4815 int atapi_check_dma(struct ata_queued_cmd *qc)
4816 {
4817 struct ata_port *ap = qc->ap;
4818
4819 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4820 * few ATAPI devices choke on such DMA requests.
4821 */
4822 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4823 unlikely(qc->nbytes & 15))
4824 return 1;
4825
4826 if (ap->ops->check_atapi_dma)
4827 return ap->ops->check_atapi_dma(qc);
4828
4829 return 0;
4830 }
4831
4832 /**
4833 * ata_std_qc_defer - Check whether a qc needs to be deferred
4834 * @qc: ATA command in question
4835 *
4836 * Non-NCQ commands cannot run with any other command, NCQ or
4837 * not. As upper layer only knows the queue depth, we are
4838 * responsible for maintaining exclusion. This function checks
4839 * whether a new command @qc can be issued.
4840 *
4841 * LOCKING:
4842 * spin_lock_irqsave(host lock)
4843 *
4844 * RETURNS:
4845 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4846 */
4847 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4848 {
4849 struct ata_link *link = qc->dev->link;
4850
4851 if (ata_is_ncq(qc->tf.protocol)) {
4852 if (!ata_tag_valid(link->active_tag))
4853 return 0;
4854 } else {
4855 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4856 return 0;
4857 }
4858
4859 return ATA_DEFER_LINK;
4860 }
4861
4862 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4863
4864 /**
4865 * ata_sg_init - Associate command with scatter-gather table.
4866 * @qc: Command to be associated
4867 * @sg: Scatter-gather table.
4868 * @n_elem: Number of elements in s/g table.
4869 *
4870 * Initialize the data-related elements of queued_cmd @qc
4871 * to point to a scatter-gather table @sg, containing @n_elem
4872 * elements.
4873 *
4874 * LOCKING:
4875 * spin_lock_irqsave(host lock)
4876 */
4877 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4878 unsigned int n_elem)
4879 {
4880 qc->sg = sg;
4881 qc->n_elem = n_elem;
4882 qc->cursg = qc->sg;
4883 }
4884
4885 /**
4886 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4887 * @qc: Command with scatter-gather table to be mapped.
4888 *
4889 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4890 *
4891 * LOCKING:
4892 * spin_lock_irqsave(host lock)
4893 *
4894 * RETURNS:
4895 * Zero on success, negative on error.
4896 *
4897 */
4898 static int ata_sg_setup(struct ata_queued_cmd *qc)
4899 {
4900 struct ata_port *ap = qc->ap;
4901 unsigned int n_elem;
4902
4903 VPRINTK("ENTER, ata%u\n", ap->print_id);
4904
4905 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4906 if (n_elem < 1)
4907 return -1;
4908
4909 DPRINTK("%d sg elements mapped\n", n_elem);
4910 qc->orig_n_elem = qc->n_elem;
4911 qc->n_elem = n_elem;
4912 qc->flags |= ATA_QCFLAG_DMAMAP;
4913
4914 return 0;
4915 }
4916
4917 /**
4918 * swap_buf_le16 - swap halves of 16-bit words in place
4919 * @buf: Buffer to swap
4920 * @buf_words: Number of 16-bit words in buffer.
4921 *
4922 * Swap halves of 16-bit words if needed to convert from
4923 * little-endian byte order to native cpu byte order, or
4924 * vice-versa.
4925 *
4926 * LOCKING:
4927 * Inherited from caller.
4928 */
4929 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4930 {
4931 #ifdef __BIG_ENDIAN
4932 unsigned int i;
4933
4934 for (i = 0; i < buf_words; i++)
4935 buf[i] = le16_to_cpu(buf[i]);
4936 #endif /* __BIG_ENDIAN */
4937 }
4938
4939 /**
4940 * ata_qc_new_init - Request an available ATA command, and initialize it
4941 * @dev: Device from whom we request an available command structure
4942 * @tag: tag
4943 *
4944 * LOCKING:
4945 * None.
4946 */
4947
4948 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4949 {
4950 struct ata_port *ap = dev->link->ap;
4951 struct ata_queued_cmd *qc;
4952
4953 /* no command while frozen */
4954 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4955 return NULL;
4956
4957 /* libsas case */
4958 if (ap->flags & ATA_FLAG_SAS_HOST) {
4959 tag = ata_sas_allocate_tag(ap);
4960 if (tag < 0)
4961 return NULL;
4962 }
4963
4964 qc = __ata_qc_from_tag(ap, tag);
4965 qc->tag = tag;
4966 qc->scsicmd = NULL;
4967 qc->ap = ap;
4968 qc->dev = dev;
4969
4970 ata_qc_reinit(qc);
4971
4972 return qc;
4973 }
4974
4975 /**
4976 * ata_qc_free - free unused ata_queued_cmd
4977 * @qc: Command to complete
4978 *
4979 * Designed to free unused ata_queued_cmd object
4980 * in case something prevents using it.
4981 *
4982 * LOCKING:
4983 * spin_lock_irqsave(host lock)
4984 */
4985 void ata_qc_free(struct ata_queued_cmd *qc)
4986 {
4987 struct ata_port *ap;
4988 unsigned int tag;
4989
4990 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4991 ap = qc->ap;
4992
4993 qc->flags = 0;
4994 tag = qc->tag;
4995 if (likely(ata_tag_valid(tag))) {
4996 qc->tag = ATA_TAG_POISON;
4997 if (ap->flags & ATA_FLAG_SAS_HOST)
4998 ata_sas_free_tag(tag, ap);
4999 }
5000 }
5001
5002 void __ata_qc_complete(struct ata_queued_cmd *qc)
5003 {
5004 struct ata_port *ap;
5005 struct ata_link *link;
5006
5007 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5008 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
5009 ap = qc->ap;
5010 link = qc->dev->link;
5011
5012 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5013 ata_sg_clean(qc);
5014
5015 /* command should be marked inactive atomically with qc completion */
5016 if (ata_is_ncq(qc->tf.protocol)) {
5017 link->sactive &= ~(1 << qc->tag);
5018 if (!link->sactive)
5019 ap->nr_active_links--;
5020 } else {
5021 link->active_tag = ATA_TAG_POISON;
5022 ap->nr_active_links--;
5023 }
5024
5025 /* clear exclusive status */
5026 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5027 ap->excl_link == link))
5028 ap->excl_link = NULL;
5029
5030 /* atapi: mark qc as inactive to prevent the interrupt handler
5031 * from completing the command twice later, before the error handler
5032 * is called. (when rc != 0 and atapi request sense is needed)
5033 */
5034 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5035 ap->qc_active &= ~(1 << qc->tag);
5036
5037 /* call completion callback */
5038 qc->complete_fn(qc);
5039 }
5040
5041 static void fill_result_tf(struct ata_queued_cmd *qc)
5042 {
5043 struct ata_port *ap = qc->ap;
5044
5045 qc->result_tf.flags = qc->tf.flags;
5046 ap->ops->qc_fill_rtf(qc);
5047 }
5048
5049 static void ata_verify_xfer(struct ata_queued_cmd *qc)
5050 {
5051 struct ata_device *dev = qc->dev;
5052
5053 if (!ata_is_data(qc->tf.protocol))
5054 return;
5055
5056 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5057 return;
5058
5059 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5060 }
5061
5062 /**
5063 * ata_qc_complete - Complete an active ATA command
5064 * @qc: Command to complete
5065 *
5066 * Indicate to the mid and upper layers that an ATA command has
5067 * completed, with either an ok or not-ok status.
5068 *
5069 * Refrain from calling this function multiple times when
5070 * successfully completing multiple NCQ commands.
5071 * ata_qc_complete_multiple() should be used instead, which will
5072 * properly update IRQ expect state.
5073 *
5074 * LOCKING:
5075 * spin_lock_irqsave(host lock)
5076 */
5077 void ata_qc_complete(struct ata_queued_cmd *qc)
5078 {
5079 struct ata_port *ap = qc->ap;
5080
5081 /* XXX: New EH and old EH use different mechanisms to
5082 * synchronize EH with regular execution path.
5083 *
5084 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5085 * Normal execution path is responsible for not accessing a
5086 * failed qc. libata core enforces the rule by returning NULL
5087 * from ata_qc_from_tag() for failed qcs.
5088 *
5089 * Old EH depends on ata_qc_complete() nullifying completion
5090 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5091 * not synchronize with interrupt handler. Only PIO task is
5092 * taken care of.
5093 */
5094 if (ap->ops->error_handler) {
5095 struct ata_device *dev = qc->dev;
5096 struct ata_eh_info *ehi = &dev->link->eh_info;
5097
5098 if (unlikely(qc->err_mask))
5099 qc->flags |= ATA_QCFLAG_FAILED;
5100
5101 /*
5102 * Finish internal commands without any further processing
5103 * and always with the result TF filled.
5104 */
5105 if (unlikely(ata_tag_internal(qc->tag))) {
5106 fill_result_tf(qc);
5107 trace_ata_qc_complete_internal(qc);
5108 __ata_qc_complete(qc);
5109 return;
5110 }
5111
5112 /*
5113 * Non-internal qc has failed. Fill the result TF and
5114 * summon EH.
5115 */
5116 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5117 fill_result_tf(qc);
5118 trace_ata_qc_complete_failed(qc);
5119 ata_qc_schedule_eh(qc);
5120 return;
5121 }
5122
5123 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5124
5125 /* read result TF if requested */
5126 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5127 fill_result_tf(qc);
5128
5129 trace_ata_qc_complete_done(qc);
5130 /* Some commands need post-processing after successful
5131 * completion.
5132 */
5133 switch (qc->tf.command) {
5134 case ATA_CMD_SET_FEATURES:
5135 if (qc->tf.feature != SETFEATURES_WC_ON &&
5136 qc->tf.feature != SETFEATURES_WC_OFF &&
5137 qc->tf.feature != SETFEATURES_RA_ON &&
5138 qc->tf.feature != SETFEATURES_RA_OFF)
5139 break;
5140 /* fall through */
5141 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5142 case ATA_CMD_SET_MULTI: /* multi_count changed */
5143 /* revalidate device */
5144 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5145 ata_port_schedule_eh(ap);
5146 break;
5147
5148 case ATA_CMD_SLEEP:
5149 dev->flags |= ATA_DFLAG_SLEEPING;
5150 break;
5151 }
5152
5153 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5154 ata_verify_xfer(qc);
5155
5156 __ata_qc_complete(qc);
5157 } else {
5158 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5159 return;
5160
5161 /* read result TF if failed or requested */
5162 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5163 fill_result_tf(qc);
5164
5165 __ata_qc_complete(qc);
5166 }
5167 }
5168
5169 /**
5170 * ata_qc_complete_multiple - Complete multiple qcs successfully
5171 * @ap: port in question
5172 * @qc_active: new qc_active mask
5173 *
5174 * Complete in-flight commands. This functions is meant to be
5175 * called from low-level driver's interrupt routine to complete
5176 * requests normally. ap->qc_active and @qc_active is compared
5177 * and commands are completed accordingly.
5178 *
5179 * Always use this function when completing multiple NCQ commands
5180 * from IRQ handlers instead of calling ata_qc_complete()
5181 * multiple times to keep IRQ expect status properly in sync.
5182 *
5183 * LOCKING:
5184 * spin_lock_irqsave(host lock)
5185 *
5186 * RETURNS:
5187 * Number of completed commands on success, -errno otherwise.
5188 */
5189 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5190 {
5191 int nr_done = 0;
5192 u32 done_mask;
5193
5194 done_mask = ap->qc_active ^ qc_active;
5195
5196 if (unlikely(done_mask & qc_active)) {
5197 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5198 ap->qc_active, qc_active);
5199 return -EINVAL;
5200 }
5201
5202 while (done_mask) {
5203 struct ata_queued_cmd *qc;
5204 unsigned int tag = __ffs(done_mask);
5205
5206 qc = ata_qc_from_tag(ap, tag);
5207 if (qc) {
5208 ata_qc_complete(qc);
5209 nr_done++;
5210 }
5211 done_mask &= ~(1 << tag);
5212 }
5213
5214 return nr_done;
5215 }
5216
5217 /**
5218 * ata_qc_issue - issue taskfile to device
5219 * @qc: command to issue to device
5220 *
5221 * Prepare an ATA command to submission to device.
5222 * This includes mapping the data into a DMA-able
5223 * area, filling in the S/G table, and finally
5224 * writing the taskfile to hardware, starting the command.
5225 *
5226 * LOCKING:
5227 * spin_lock_irqsave(host lock)
5228 */
5229 void ata_qc_issue(struct ata_queued_cmd *qc)
5230 {
5231 struct ata_port *ap = qc->ap;
5232 struct ata_link *link = qc->dev->link;
5233 u8 prot = qc->tf.protocol;
5234
5235 /* Make sure only one non-NCQ command is outstanding. The
5236 * check is skipped for old EH because it reuses active qc to
5237 * request ATAPI sense.
5238 */
5239 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5240
5241 if (ata_is_ncq(prot)) {
5242 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5243
5244 if (!link->sactive)
5245 ap->nr_active_links++;
5246 link->sactive |= 1 << qc->tag;
5247 } else {
5248 WARN_ON_ONCE(link->sactive);
5249
5250 ap->nr_active_links++;
5251 link->active_tag = qc->tag;
5252 }
5253
5254 qc->flags |= ATA_QCFLAG_ACTIVE;
5255 ap->qc_active |= 1 << qc->tag;
5256
5257 /*
5258 * We guarantee to LLDs that they will have at least one
5259 * non-zero sg if the command is a data command.
5260 */
5261 if (WARN_ON_ONCE(ata_is_data(prot) &&
5262 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5263 goto sys_err;
5264
5265 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5266 (ap->flags & ATA_FLAG_PIO_DMA)))
5267 if (ata_sg_setup(qc))
5268 goto sys_err;
5269
5270 /* if device is sleeping, schedule reset and abort the link */
5271 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5272 link->eh_info.action |= ATA_EH_RESET;
5273 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5274 ata_link_abort(link);
5275 return;
5276 }
5277
5278 ap->ops->qc_prep(qc);
5279 trace_ata_qc_issue(qc);
5280 qc->err_mask |= ap->ops->qc_issue(qc);
5281 if (unlikely(qc->err_mask))
5282 goto err;
5283 return;
5284
5285 sys_err:
5286 qc->err_mask |= AC_ERR_SYSTEM;
5287 err:
5288 ata_qc_complete(qc);
5289 }
5290
5291 /**
5292 * sata_scr_valid - test whether SCRs are accessible
5293 * @link: ATA link to test SCR accessibility for
5294 *
5295 * Test whether SCRs are accessible for @link.
5296 *
5297 * LOCKING:
5298 * None.
5299 *
5300 * RETURNS:
5301 * 1 if SCRs are accessible, 0 otherwise.
5302 */
5303 int sata_scr_valid(struct ata_link *link)
5304 {
5305 struct ata_port *ap = link->ap;
5306
5307 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5308 }
5309
5310 /**
5311 * sata_scr_read - read SCR register of the specified port
5312 * @link: ATA link to read SCR for
5313 * @reg: SCR to read
5314 * @val: Place to store read value
5315 *
5316 * Read SCR register @reg of @link into *@val. This function is
5317 * guaranteed to succeed if @link is ap->link, the cable type of
5318 * the port is SATA and the port implements ->scr_read.
5319 *
5320 * LOCKING:
5321 * None if @link is ap->link. Kernel thread context otherwise.
5322 *
5323 * RETURNS:
5324 * 0 on success, negative errno on failure.
5325 */
5326 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5327 {
5328 if (ata_is_host_link(link)) {
5329 if (sata_scr_valid(link))
5330 return link->ap->ops->scr_read(link, reg, val);
5331 return -EOPNOTSUPP;
5332 }
5333
5334 return sata_pmp_scr_read(link, reg, val);
5335 }
5336
5337 /**
5338 * sata_scr_write - write SCR register of the specified port
5339 * @link: ATA link to write SCR for
5340 * @reg: SCR to write
5341 * @val: value to write
5342 *
5343 * Write @val to SCR register @reg of @link. This function is
5344 * guaranteed to succeed if @link is ap->link, the cable type of
5345 * the port is SATA and the port implements ->scr_read.
5346 *
5347 * LOCKING:
5348 * None if @link is ap->link. Kernel thread context otherwise.
5349 *
5350 * RETURNS:
5351 * 0 on success, negative errno on failure.
5352 */
5353 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5354 {
5355 if (ata_is_host_link(link)) {
5356 if (sata_scr_valid(link))
5357 return link->ap->ops->scr_write(link, reg, val);
5358 return -EOPNOTSUPP;
5359 }
5360
5361 return sata_pmp_scr_write(link, reg, val);
5362 }
5363
5364 /**
5365 * sata_scr_write_flush - write SCR register of the specified port and flush
5366 * @link: ATA link to write SCR for
5367 * @reg: SCR to write
5368 * @val: value to write
5369 *
5370 * This function is identical to sata_scr_write() except that this
5371 * function performs flush after writing to the register.
5372 *
5373 * LOCKING:
5374 * None if @link is ap->link. Kernel thread context otherwise.
5375 *
5376 * RETURNS:
5377 * 0 on success, negative errno on failure.
5378 */
5379 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5380 {
5381 if (ata_is_host_link(link)) {
5382 int rc;
5383
5384 if (sata_scr_valid(link)) {
5385 rc = link->ap->ops->scr_write(link, reg, val);
5386 if (rc == 0)
5387 rc = link->ap->ops->scr_read(link, reg, &val);
5388 return rc;
5389 }
5390 return -EOPNOTSUPP;
5391 }
5392
5393 return sata_pmp_scr_write(link, reg, val);
5394 }
5395
5396 /**
5397 * ata_phys_link_online - test whether the given link is online
5398 * @link: ATA link to test
5399 *
5400 * Test whether @link is online. Note that this function returns
5401 * 0 if online status of @link cannot be obtained, so
5402 * ata_link_online(link) != !ata_link_offline(link).
5403 *
5404 * LOCKING:
5405 * None.
5406 *
5407 * RETURNS:
5408 * True if the port online status is available and online.
5409 */
5410 bool ata_phys_link_online(struct ata_link *link)
5411 {
5412 u32 sstatus;
5413
5414 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5415 ata_sstatus_online(sstatus))
5416 return true;
5417 return false;
5418 }
5419
5420 /**
5421 * ata_phys_link_offline - test whether the given link is offline
5422 * @link: ATA link to test
5423 *
5424 * Test whether @link is offline. Note that this function
5425 * returns 0 if offline status of @link cannot be obtained, so
5426 * ata_link_online(link) != !ata_link_offline(link).
5427 *
5428 * LOCKING:
5429 * None.
5430 *
5431 * RETURNS:
5432 * True if the port offline status is available and offline.
5433 */
5434 bool ata_phys_link_offline(struct ata_link *link)
5435 {
5436 u32 sstatus;
5437
5438 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5439 !ata_sstatus_online(sstatus))
5440 return true;
5441 return false;
5442 }
5443
5444 /**
5445 * ata_link_online - test whether the given link is online
5446 * @link: ATA link to test
5447 *
5448 * Test whether @link is online. This is identical to
5449 * ata_phys_link_online() when there's no slave link. When
5450 * there's a slave link, this function should only be called on
5451 * the master link and will return true if any of M/S links is
5452 * online.
5453 *
5454 * LOCKING:
5455 * None.
5456 *
5457 * RETURNS:
5458 * True if the port online status is available and online.
5459 */
5460 bool ata_link_online(struct ata_link *link)
5461 {
5462 struct ata_link *slave = link->ap->slave_link;
5463
5464 WARN_ON(link == slave); /* shouldn't be called on slave link */
5465
5466 return ata_phys_link_online(link) ||
5467 (slave && ata_phys_link_online(slave));
5468 }
5469
5470 /**
5471 * ata_link_offline - test whether the given link is offline
5472 * @link: ATA link to test
5473 *
5474 * Test whether @link is offline. This is identical to
5475 * ata_phys_link_offline() when there's no slave link. When
5476 * there's a slave link, this function should only be called on
5477 * the master link and will return true if both M/S links are
5478 * offline.
5479 *
5480 * LOCKING:
5481 * None.
5482 *
5483 * RETURNS:
5484 * True if the port offline status is available and offline.
5485 */
5486 bool ata_link_offline(struct ata_link *link)
5487 {
5488 struct ata_link *slave = link->ap->slave_link;
5489
5490 WARN_ON(link == slave); /* shouldn't be called on slave link */
5491
5492 return ata_phys_link_offline(link) &&
5493 (!slave || ata_phys_link_offline(slave));
5494 }
5495
5496 #ifdef CONFIG_PM
5497 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5498 unsigned int action, unsigned int ehi_flags,
5499 bool async)
5500 {
5501 struct ata_link *link;
5502 unsigned long flags;
5503
5504 /* Previous resume operation might still be in
5505 * progress. Wait for PM_PENDING to clear.
5506 */
5507 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5508 ata_port_wait_eh(ap);
5509 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5510 }
5511
5512 /* request PM ops to EH */
5513 spin_lock_irqsave(ap->lock, flags);
5514
5515 ap->pm_mesg = mesg;
5516 ap->pflags |= ATA_PFLAG_PM_PENDING;
5517 ata_for_each_link(link, ap, HOST_FIRST) {
5518 link->eh_info.action |= action;
5519 link->eh_info.flags |= ehi_flags;
5520 }
5521
5522 ata_port_schedule_eh(ap);
5523
5524 spin_unlock_irqrestore(ap->lock, flags);
5525
5526 if (!async) {
5527 ata_port_wait_eh(ap);
5528 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5529 }
5530 }
5531
5532 /*
5533 * On some hardware, device fails to respond after spun down for suspend. As
5534 * the device won't be used before being resumed, we don't need to touch the
5535 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
5536 *
5537 * http://thread.gmane.org/gmane.linux.ide/46764
5538 */
5539 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5540 | ATA_EHI_NO_AUTOPSY
5541 | ATA_EHI_NO_RECOVERY;
5542
5543 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5544 {
5545 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5546 }
5547
5548 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5549 {
5550 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5551 }
5552
5553 static int ata_port_pm_suspend(struct device *dev)
5554 {
5555 struct ata_port *ap = to_ata_port(dev);
5556
5557 if (pm_runtime_suspended(dev))
5558 return 0;
5559
5560 ata_port_suspend(ap, PMSG_SUSPEND);
5561 return 0;
5562 }
5563
5564 static int ata_port_pm_freeze(struct device *dev)
5565 {
5566 struct ata_port *ap = to_ata_port(dev);
5567
5568 if (pm_runtime_suspended(dev))
5569 return 0;
5570
5571 ata_port_suspend(ap, PMSG_FREEZE);
5572 return 0;
5573 }
5574
5575 static int ata_port_pm_poweroff(struct device *dev)
5576 {
5577 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5578 return 0;
5579 }
5580
5581 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5582 | ATA_EHI_QUIET;
5583
5584 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5585 {
5586 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5587 }
5588
5589 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5590 {
5591 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5592 }
5593
5594 static int ata_port_pm_resume(struct device *dev)
5595 {
5596 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5597 pm_runtime_disable(dev);
5598 pm_runtime_set_active(dev);
5599 pm_runtime_enable(dev);
5600 return 0;
5601 }
5602
5603 /*
5604 * For ODDs, the upper layer will poll for media change every few seconds,
5605 * which will make it enter and leave suspend state every few seconds. And
5606 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5607 * is very little and the ODD may malfunction after constantly being reset.
5608 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5609 * ODD is attached to the port.
5610 */
5611 static int ata_port_runtime_idle(struct device *dev)
5612 {
5613 struct ata_port *ap = to_ata_port(dev);
5614 struct ata_link *link;
5615 struct ata_device *adev;
5616
5617 ata_for_each_link(link, ap, HOST_FIRST) {
5618 ata_for_each_dev(adev, link, ENABLED)
5619 if (adev->class == ATA_DEV_ATAPI &&
5620 !zpodd_dev_enabled(adev))
5621 return -EBUSY;
5622 }
5623
5624 return 0;
5625 }
5626
5627 static int ata_port_runtime_suspend(struct device *dev)
5628 {
5629 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5630 return 0;
5631 }
5632
5633 static int ata_port_runtime_resume(struct device *dev)
5634 {
5635 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5636 return 0;
5637 }
5638
5639 static const struct dev_pm_ops ata_port_pm_ops = {
5640 .suspend = ata_port_pm_suspend,
5641 .resume = ata_port_pm_resume,
5642 .freeze = ata_port_pm_freeze,
5643 .thaw = ata_port_pm_resume,
5644 .poweroff = ata_port_pm_poweroff,
5645 .restore = ata_port_pm_resume,
5646
5647 .runtime_suspend = ata_port_runtime_suspend,
5648 .runtime_resume = ata_port_runtime_resume,
5649 .runtime_idle = ata_port_runtime_idle,
5650 };
5651
5652 /* sas ports don't participate in pm runtime management of ata_ports,
5653 * and need to resume ata devices at the domain level, not the per-port
5654 * level. sas suspend/resume is async to allow parallel port recovery
5655 * since sas has multiple ata_port instances per Scsi_Host.
5656 */
5657 void ata_sas_port_suspend(struct ata_port *ap)
5658 {
5659 ata_port_suspend_async(ap, PMSG_SUSPEND);
5660 }
5661 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5662
5663 void ata_sas_port_resume(struct ata_port *ap)
5664 {
5665 ata_port_resume_async(ap, PMSG_RESUME);
5666 }
5667 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5668
5669 /**
5670 * ata_host_suspend - suspend host
5671 * @host: host to suspend
5672 * @mesg: PM message
5673 *
5674 * Suspend @host. Actual operation is performed by port suspend.
5675 */
5676 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5677 {
5678 host->dev->power.power_state = mesg;
5679 return 0;
5680 }
5681
5682 /**
5683 * ata_host_resume - resume host
5684 * @host: host to resume
5685 *
5686 * Resume @host. Actual operation is performed by port resume.
5687 */
5688 void ata_host_resume(struct ata_host *host)
5689 {
5690 host->dev->power.power_state = PMSG_ON;
5691 }
5692 #endif
5693
5694 struct device_type ata_port_type = {
5695 .name = "ata_port",
5696 #ifdef CONFIG_PM
5697 .pm = &ata_port_pm_ops,
5698 #endif
5699 };
5700
5701 /**
5702 * ata_dev_init - Initialize an ata_device structure
5703 * @dev: Device structure to initialize
5704 *
5705 * Initialize @dev in preparation for probing.
5706 *
5707 * LOCKING:
5708 * Inherited from caller.
5709 */
5710 void ata_dev_init(struct ata_device *dev)
5711 {
5712 struct ata_link *link = ata_dev_phys_link(dev);
5713 struct ata_port *ap = link->ap;
5714 unsigned long flags;
5715
5716 /* SATA spd limit is bound to the attached device, reset together */
5717 link->sata_spd_limit = link->hw_sata_spd_limit;
5718 link->sata_spd = 0;
5719
5720 /* High bits of dev->flags are used to record warm plug
5721 * requests which occur asynchronously. Synchronize using
5722 * host lock.
5723 */
5724 spin_lock_irqsave(ap->lock, flags);
5725 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5726 dev->horkage = 0;
5727 spin_unlock_irqrestore(ap->lock, flags);
5728
5729 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5730 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5731 dev->pio_mask = UINT_MAX;
5732 dev->mwdma_mask = UINT_MAX;
5733 dev->udma_mask = UINT_MAX;
5734 }
5735
5736 /**
5737 * ata_link_init - Initialize an ata_link structure
5738 * @ap: ATA port link is attached to
5739 * @link: Link structure to initialize
5740 * @pmp: Port multiplier port number
5741 *
5742 * Initialize @link.
5743 *
5744 * LOCKING:
5745 * Kernel thread context (may sleep)
5746 */
5747 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5748 {
5749 int i;
5750
5751 /* clear everything except for devices */
5752 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5753 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5754
5755 link->ap = ap;
5756 link->pmp = pmp;
5757 link->active_tag = ATA_TAG_POISON;
5758 link->hw_sata_spd_limit = UINT_MAX;
5759
5760 /* can't use iterator, ap isn't initialized yet */
5761 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5762 struct ata_device *dev = &link->device[i];
5763
5764 dev->link = link;
5765 dev->devno = dev - link->device;
5766 #ifdef CONFIG_ATA_ACPI
5767 dev->gtf_filter = ata_acpi_gtf_filter;
5768 #endif
5769 ata_dev_init(dev);
5770 }
5771 }
5772
5773 /**
5774 * sata_link_init_spd - Initialize link->sata_spd_limit
5775 * @link: Link to configure sata_spd_limit for
5776 *
5777 * Initialize @link->[hw_]sata_spd_limit to the currently
5778 * configured value.
5779 *
5780 * LOCKING:
5781 * Kernel thread context (may sleep).
5782 *
5783 * RETURNS:
5784 * 0 on success, -errno on failure.
5785 */
5786 int sata_link_init_spd(struct ata_link *link)
5787 {
5788 u8 spd;
5789 int rc;
5790
5791 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5792 if (rc)
5793 return rc;
5794
5795 spd = (link->saved_scontrol >> 4) & 0xf;
5796 if (spd)
5797 link->hw_sata_spd_limit &= (1 << spd) - 1;
5798
5799 ata_force_link_limits(link);
5800
5801 link->sata_spd_limit = link->hw_sata_spd_limit;
5802
5803 return 0;
5804 }
5805
5806 /**
5807 * ata_port_alloc - allocate and initialize basic ATA port resources
5808 * @host: ATA host this allocated port belongs to
5809 *
5810 * Allocate and initialize basic ATA port resources.
5811 *
5812 * RETURNS:
5813 * Allocate ATA port on success, NULL on failure.
5814 *
5815 * LOCKING:
5816 * Inherited from calling layer (may sleep).
5817 */
5818 struct ata_port *ata_port_alloc(struct ata_host *host)
5819 {
5820 struct ata_port *ap;
5821
5822 DPRINTK("ENTER\n");
5823
5824 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5825 if (!ap)
5826 return NULL;
5827
5828 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5829 ap->lock = &host->lock;
5830 ap->print_id = -1;
5831 ap->local_port_no = -1;
5832 ap->host = host;
5833 ap->dev = host->dev;
5834
5835 #if defined(ATA_VERBOSE_DEBUG)
5836 /* turn on all debugging levels */
5837 ap->msg_enable = 0x00FF;
5838 #elif defined(ATA_DEBUG)
5839 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5840 #else
5841 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5842 #endif
5843
5844 mutex_init(&ap->scsi_scan_mutex);
5845 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5846 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5847 INIT_LIST_HEAD(&ap->eh_done_q);
5848 init_waitqueue_head(&ap->eh_wait_q);
5849 init_completion(&ap->park_req_pending);
5850 init_timer_deferrable(&ap->fastdrain_timer);
5851 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5852 ap->fastdrain_timer.data = (unsigned long)ap;
5853
5854 ap->cbl = ATA_CBL_NONE;
5855
5856 ata_link_init(ap, &ap->link, 0);
5857
5858 #ifdef ATA_IRQ_TRAP
5859 ap->stats.unhandled_irq = 1;
5860 ap->stats.idle_irq = 1;
5861 #endif
5862 ata_sff_port_init(ap);
5863
5864 return ap;
5865 }
5866
5867 static void ata_host_release(struct device *gendev, void *res)
5868 {
5869 struct ata_host *host = dev_get_drvdata(gendev);
5870 int i;
5871
5872 for (i = 0; i < host->n_ports; i++) {
5873 struct ata_port *ap = host->ports[i];
5874
5875 if (!ap)
5876 continue;
5877
5878 if (ap->scsi_host)
5879 scsi_host_put(ap->scsi_host);
5880
5881 kfree(ap->pmp_link);
5882 kfree(ap->slave_link);
5883 kfree(ap);
5884 host->ports[i] = NULL;
5885 }
5886
5887 dev_set_drvdata(gendev, NULL);
5888 }
5889
5890 /**
5891 * ata_host_alloc - allocate and init basic ATA host resources
5892 * @dev: generic device this host is associated with
5893 * @max_ports: maximum number of ATA ports associated with this host
5894 *
5895 * Allocate and initialize basic ATA host resources. LLD calls
5896 * this function to allocate a host, initializes it fully and
5897 * attaches it using ata_host_register().
5898 *
5899 * @max_ports ports are allocated and host->n_ports is
5900 * initialized to @max_ports. The caller is allowed to decrease
5901 * host->n_ports before calling ata_host_register(). The unused
5902 * ports will be automatically freed on registration.
5903 *
5904 * RETURNS:
5905 * Allocate ATA host on success, NULL on failure.
5906 *
5907 * LOCKING:
5908 * Inherited from calling layer (may sleep).
5909 */
5910 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5911 {
5912 struct ata_host *host;
5913 size_t sz;
5914 int i;
5915
5916 DPRINTK("ENTER\n");
5917
5918 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5919 return NULL;
5920
5921 /* alloc a container for our list of ATA ports (buses) */
5922 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5923 /* alloc a container for our list of ATA ports (buses) */
5924 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5925 if (!host)
5926 goto err_out;
5927
5928 devres_add(dev, host);
5929 dev_set_drvdata(dev, host);
5930
5931 spin_lock_init(&host->lock);
5932 mutex_init(&host->eh_mutex);
5933 host->dev = dev;
5934 host->n_ports = max_ports;
5935
5936 /* allocate ports bound to this host */
5937 for (i = 0; i < max_ports; i++) {
5938 struct ata_port *ap;
5939
5940 ap = ata_port_alloc(host);
5941 if (!ap)
5942 goto err_out;
5943
5944 ap->port_no = i;
5945 host->ports[i] = ap;
5946 }
5947
5948 devres_remove_group(dev, NULL);
5949 return host;
5950
5951 err_out:
5952 devres_release_group(dev, NULL);
5953 return NULL;
5954 }
5955
5956 /**
5957 * ata_host_alloc_pinfo - alloc host and init with port_info array
5958 * @dev: generic device this host is associated with
5959 * @ppi: array of ATA port_info to initialize host with
5960 * @n_ports: number of ATA ports attached to this host
5961 *
5962 * Allocate ATA host and initialize with info from @ppi. If NULL
5963 * terminated, @ppi may contain fewer entries than @n_ports. The
5964 * last entry will be used for the remaining ports.
5965 *
5966 * RETURNS:
5967 * Allocate ATA host on success, NULL on failure.
5968 *
5969 * LOCKING:
5970 * Inherited from calling layer (may sleep).
5971 */
5972 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5973 const struct ata_port_info * const * ppi,
5974 int n_ports)
5975 {
5976 const struct ata_port_info *pi;
5977 struct ata_host *host;
5978 int i, j;
5979
5980 host = ata_host_alloc(dev, n_ports);
5981 if (!host)
5982 return NULL;
5983
5984 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5985 struct ata_port *ap = host->ports[i];
5986
5987 if (ppi[j])
5988 pi = ppi[j++];
5989
5990 ap->pio_mask = pi->pio_mask;
5991 ap->mwdma_mask = pi->mwdma_mask;
5992 ap->udma_mask = pi->udma_mask;
5993 ap->flags |= pi->flags;
5994 ap->link.flags |= pi->link_flags;
5995 ap->ops = pi->port_ops;
5996
5997 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5998 host->ops = pi->port_ops;
5999 }
6000
6001 return host;
6002 }
6003
6004 /**
6005 * ata_slave_link_init - initialize slave link
6006 * @ap: port to initialize slave link for
6007 *
6008 * Create and initialize slave link for @ap. This enables slave
6009 * link handling on the port.
6010 *
6011 * In libata, a port contains links and a link contains devices.
6012 * There is single host link but if a PMP is attached to it,
6013 * there can be multiple fan-out links. On SATA, there's usually
6014 * a single device connected to a link but PATA and SATA
6015 * controllers emulating TF based interface can have two - master
6016 * and slave.
6017 *
6018 * However, there are a few controllers which don't fit into this
6019 * abstraction too well - SATA controllers which emulate TF
6020 * interface with both master and slave devices but also have
6021 * separate SCR register sets for each device. These controllers
6022 * need separate links for physical link handling
6023 * (e.g. onlineness, link speed) but should be treated like a
6024 * traditional M/S controller for everything else (e.g. command
6025 * issue, softreset).
6026 *
6027 * slave_link is libata's way of handling this class of
6028 * controllers without impacting core layer too much. For
6029 * anything other than physical link handling, the default host
6030 * link is used for both master and slave. For physical link
6031 * handling, separate @ap->slave_link is used. All dirty details
6032 * are implemented inside libata core layer. From LLD's POV, the
6033 * only difference is that prereset, hardreset and postreset are
6034 * called once more for the slave link, so the reset sequence
6035 * looks like the following.
6036 *
6037 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
6038 * softreset(M) -> postreset(M) -> postreset(S)
6039 *
6040 * Note that softreset is called only for the master. Softreset
6041 * resets both M/S by definition, so SRST on master should handle
6042 * both (the standard method will work just fine).
6043 *
6044 * LOCKING:
6045 * Should be called before host is registered.
6046 *
6047 * RETURNS:
6048 * 0 on success, -errno on failure.
6049 */
6050 int ata_slave_link_init(struct ata_port *ap)
6051 {
6052 struct ata_link *link;
6053
6054 WARN_ON(ap->slave_link);
6055 WARN_ON(ap->flags & ATA_FLAG_PMP);
6056
6057 link = kzalloc(sizeof(*link), GFP_KERNEL);
6058 if (!link)
6059 return -ENOMEM;
6060
6061 ata_link_init(ap, link, 1);
6062 ap->slave_link = link;
6063 return 0;
6064 }
6065
6066 static void ata_host_stop(struct device *gendev, void *res)
6067 {
6068 struct ata_host *host = dev_get_drvdata(gendev);
6069 int i;
6070
6071 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6072
6073 for (i = 0; i < host->n_ports; i++) {
6074 struct ata_port *ap = host->ports[i];
6075
6076 if (ap->ops->port_stop)
6077 ap->ops->port_stop(ap);
6078 }
6079
6080 if (host->ops->host_stop)
6081 host->ops->host_stop(host);
6082 }
6083
6084 /**
6085 * ata_finalize_port_ops - finalize ata_port_operations
6086 * @ops: ata_port_operations to finalize
6087 *
6088 * An ata_port_operations can inherit from another ops and that
6089 * ops can again inherit from another. This can go on as many
6090 * times as necessary as long as there is no loop in the
6091 * inheritance chain.
6092 *
6093 * Ops tables are finalized when the host is started. NULL or
6094 * unspecified entries are inherited from the closet ancestor
6095 * which has the method and the entry is populated with it.
6096 * After finalization, the ops table directly points to all the
6097 * methods and ->inherits is no longer necessary and cleared.
6098 *
6099 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
6100 *
6101 * LOCKING:
6102 * None.
6103 */
6104 static void ata_finalize_port_ops(struct ata_port_operations *ops)
6105 {
6106 static DEFINE_SPINLOCK(lock);
6107 const struct ata_port_operations *cur;
6108 void **begin = (void **)ops;
6109 void **end = (void **)&ops->inherits;
6110 void **pp;
6111
6112 if (!ops || !ops->inherits)
6113 return;
6114
6115 spin_lock(&lock);
6116
6117 for (cur = ops->inherits; cur; cur = cur->inherits) {
6118 void **inherit = (void **)cur;
6119
6120 for (pp = begin; pp < end; pp++, inherit++)
6121 if (!*pp)
6122 *pp = *inherit;
6123 }
6124
6125 for (pp = begin; pp < end; pp++)
6126 if (IS_ERR(*pp))
6127 *pp = NULL;
6128
6129 ops->inherits = NULL;
6130
6131 spin_unlock(&lock);
6132 }
6133
6134 /**
6135 * ata_host_start - start and freeze ports of an ATA host
6136 * @host: ATA host to start ports for
6137 *
6138 * Start and then freeze ports of @host. Started status is
6139 * recorded in host->flags, so this function can be called
6140 * multiple times. Ports are guaranteed to get started only
6141 * once. If host->ops isn't initialized yet, its set to the
6142 * first non-dummy port ops.
6143 *
6144 * LOCKING:
6145 * Inherited from calling layer (may sleep).
6146 *
6147 * RETURNS:
6148 * 0 if all ports are started successfully, -errno otherwise.
6149 */
6150 int ata_host_start(struct ata_host *host)
6151 {
6152 int have_stop = 0;
6153 void *start_dr = NULL;
6154 int i, rc;
6155
6156 if (host->flags & ATA_HOST_STARTED)
6157 return 0;
6158
6159 ata_finalize_port_ops(host->ops);
6160
6161 for (i = 0; i < host->n_ports; i++) {
6162 struct ata_port *ap = host->ports[i];
6163
6164 ata_finalize_port_ops(ap->ops);
6165
6166 if (!host->ops && !ata_port_is_dummy(ap))
6167 host->ops = ap->ops;
6168
6169 if (ap->ops->port_stop)
6170 have_stop = 1;
6171 }
6172
6173 if (host->ops->host_stop)
6174 have_stop = 1;
6175
6176 if (have_stop) {
6177 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6178 if (!start_dr)
6179 return -ENOMEM;
6180 }
6181
6182 for (i = 0; i < host->n_ports; i++) {
6183 struct ata_port *ap = host->ports[i];
6184
6185 if (ap->ops->port_start) {
6186 rc = ap->ops->port_start(ap);
6187 if (rc) {
6188 if (rc != -ENODEV)
6189 dev_err(host->dev,
6190 "failed to start port %d (errno=%d)\n",
6191 i, rc);
6192 goto err_out;
6193 }
6194 }
6195 ata_eh_freeze_port(ap);
6196 }
6197
6198 if (start_dr)
6199 devres_add(host->dev, start_dr);
6200 host->flags |= ATA_HOST_STARTED;
6201 return 0;
6202
6203 err_out:
6204 while (--i >= 0) {
6205 struct ata_port *ap = host->ports[i];
6206
6207 if (ap->ops->port_stop)
6208 ap->ops->port_stop(ap);
6209 }
6210 devres_free(start_dr);
6211 return rc;
6212 }
6213
6214 /**
6215 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6216 * @host: host to initialize
6217 * @dev: device host is attached to
6218 * @ops: port_ops
6219 *
6220 */
6221 void ata_host_init(struct ata_host *host, struct device *dev,
6222 struct ata_port_operations *ops)
6223 {
6224 spin_lock_init(&host->lock);
6225 mutex_init(&host->eh_mutex);
6226 host->n_tags = ATA_MAX_QUEUE - 1;
6227 host->dev = dev;
6228 host->ops = ops;
6229 }
6230
6231 void __ata_port_probe(struct ata_port *ap)
6232 {
6233 struct ata_eh_info *ehi = &ap->link.eh_info;
6234 unsigned long flags;
6235
6236 /* kick EH for boot probing */
6237 spin_lock_irqsave(ap->lock, flags);
6238
6239 ehi->probe_mask |= ATA_ALL_DEVICES;
6240 ehi->action |= ATA_EH_RESET;
6241 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6242
6243 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6244 ap->pflags |= ATA_PFLAG_LOADING;
6245 ata_port_schedule_eh(ap);
6246
6247 spin_unlock_irqrestore(ap->lock, flags);
6248 }
6249
6250 int ata_port_probe(struct ata_port *ap)
6251 {
6252 int rc = 0;
6253
6254 if (ap->ops->error_handler) {
6255 __ata_port_probe(ap);
6256 ata_port_wait_eh(ap);
6257 } else {
6258 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6259 rc = ata_bus_probe(ap);
6260 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6261 }
6262 return rc;
6263 }
6264
6265
6266 static void async_port_probe(void *data, async_cookie_t cookie)
6267 {
6268 struct ata_port *ap = data;
6269
6270 /*
6271 * If we're not allowed to scan this host in parallel,
6272 * we need to wait until all previous scans have completed
6273 * before going further.
6274 * Jeff Garzik says this is only within a controller, so we
6275 * don't need to wait for port 0, only for later ports.
6276 */
6277 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6278 async_synchronize_cookie(cookie);
6279
6280 (void)ata_port_probe(ap);
6281
6282 /* in order to keep device order, we need to synchronize at this point */
6283 async_synchronize_cookie(cookie);
6284
6285 ata_scsi_scan_host(ap, 1);
6286 }
6287
6288 /**
6289 * ata_host_register - register initialized ATA host
6290 * @host: ATA host to register
6291 * @sht: template for SCSI host
6292 *
6293 * Register initialized ATA host. @host is allocated using
6294 * ata_host_alloc() and fully initialized by LLD. This function
6295 * starts ports, registers @host with ATA and SCSI layers and
6296 * probe registered devices.
6297 *
6298 * LOCKING:
6299 * Inherited from calling layer (may sleep).
6300 *
6301 * RETURNS:
6302 * 0 on success, -errno otherwise.
6303 */
6304 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6305 {
6306 int i, rc;
6307
6308 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6309
6310 /* host must have been started */
6311 if (!(host->flags & ATA_HOST_STARTED)) {
6312 dev_err(host->dev, "BUG: trying to register unstarted host\n");
6313 WARN_ON(1);
6314 return -EINVAL;
6315 }
6316
6317 /* Blow away unused ports. This happens when LLD can't
6318 * determine the exact number of ports to allocate at
6319 * allocation time.
6320 */
6321 for (i = host->n_ports; host->ports[i]; i++)
6322 kfree(host->ports[i]);
6323
6324 /* give ports names and add SCSI hosts */
6325 for (i = 0; i < host->n_ports; i++) {
6326 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6327 host->ports[i]->local_port_no = i + 1;
6328 }
6329
6330 /* Create associated sysfs transport objects */
6331 for (i = 0; i < host->n_ports; i++) {
6332 rc = ata_tport_add(host->dev,host->ports[i]);
6333 if (rc) {
6334 goto err_tadd;
6335 }
6336 }
6337
6338 rc = ata_scsi_add_hosts(host, sht);
6339 if (rc)
6340 goto err_tadd;
6341
6342 /* set cable, sata_spd_limit and report */
6343 for (i = 0; i < host->n_ports; i++) {
6344 struct ata_port *ap = host->ports[i];
6345 unsigned long xfer_mask;
6346
6347 /* set SATA cable type if still unset */
6348 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6349 ap->cbl = ATA_CBL_SATA;
6350
6351 /* init sata_spd_limit to the current value */
6352 sata_link_init_spd(&ap->link);
6353 if (ap->slave_link)
6354 sata_link_init_spd(ap->slave_link);
6355
6356 /* print per-port info to dmesg */
6357 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6358 ap->udma_mask);
6359
6360 if (!ata_port_is_dummy(ap)) {
6361 ata_port_info(ap, "%cATA max %s %s\n",
6362 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6363 ata_mode_string(xfer_mask),
6364 ap->link.eh_info.desc);
6365 ata_ehi_clear_desc(&ap->link.eh_info);
6366 } else
6367 ata_port_info(ap, "DUMMY\n");
6368 }
6369
6370 /* perform each probe asynchronously */
6371 for (i = 0; i < host->n_ports; i++) {
6372 struct ata_port *ap = host->ports[i];
6373 async_schedule(async_port_probe, ap);
6374 }
6375
6376 return 0;
6377
6378 err_tadd:
6379 while (--i >= 0) {
6380 ata_tport_delete(host->ports[i]);
6381 }
6382 return rc;
6383
6384 }
6385
6386 /**
6387 * ata_host_activate - start host, request IRQ and register it
6388 * @host: target ATA host
6389 * @irq: IRQ to request
6390 * @irq_handler: irq_handler used when requesting IRQ
6391 * @irq_flags: irq_flags used when requesting IRQ
6392 * @sht: scsi_host_template to use when registering the host
6393 *
6394 * After allocating an ATA host and initializing it, most libata
6395 * LLDs perform three steps to activate the host - start host,
6396 * request IRQ and register it. This helper takes necessary
6397 * arguments and performs the three steps in one go.
6398 *
6399 * An invalid IRQ skips the IRQ registration and expects the host to
6400 * have set polling mode on the port. In this case, @irq_handler
6401 * should be NULL.
6402 *
6403 * LOCKING:
6404 * Inherited from calling layer (may sleep).
6405 *
6406 * RETURNS:
6407 * 0 on success, -errno otherwise.
6408 */
6409 int ata_host_activate(struct ata_host *host, int irq,
6410 irq_handler_t irq_handler, unsigned long irq_flags,
6411 struct scsi_host_template *sht)
6412 {
6413 int i, rc;
6414 char *irq_desc;
6415
6416 rc = ata_host_start(host);
6417 if (rc)
6418 return rc;
6419
6420 /* Special case for polling mode */
6421 if (!irq) {
6422 WARN_ON(irq_handler);
6423 return ata_host_register(host, sht);
6424 }
6425
6426 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6427 dev_driver_string(host->dev),
6428 dev_name(host->dev));
6429 if (!irq_desc)
6430 return -ENOMEM;
6431
6432 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6433 irq_desc, host);
6434 if (rc)
6435 return rc;
6436
6437 for (i = 0; i < host->n_ports; i++)
6438 ata_port_desc(host->ports[i], "irq %d", irq);
6439
6440 rc = ata_host_register(host, sht);
6441 /* if failed, just free the IRQ and leave ports alone */
6442 if (rc)
6443 devm_free_irq(host->dev, irq, host);
6444
6445 return rc;
6446 }
6447
6448 /**
6449 * ata_port_detach - Detach ATA port in preparation of device removal
6450 * @ap: ATA port to be detached
6451 *
6452 * Detach all ATA devices and the associated SCSI devices of @ap;
6453 * then, remove the associated SCSI host. @ap is guaranteed to
6454 * be quiescent on return from this function.
6455 *
6456 * LOCKING:
6457 * Kernel thread context (may sleep).
6458 */
6459 static void ata_port_detach(struct ata_port *ap)
6460 {
6461 unsigned long flags;
6462 struct ata_link *link;
6463 struct ata_device *dev;
6464
6465 if (!ap->ops->error_handler)
6466 goto skip_eh;
6467
6468 /* tell EH we're leaving & flush EH */
6469 spin_lock_irqsave(ap->lock, flags);
6470 ap->pflags |= ATA_PFLAG_UNLOADING;
6471 ata_port_schedule_eh(ap);
6472 spin_unlock_irqrestore(ap->lock, flags);
6473
6474 /* wait till EH commits suicide */
6475 ata_port_wait_eh(ap);
6476
6477 /* it better be dead now */
6478 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6479
6480 cancel_delayed_work_sync(&ap->hotplug_task);
6481
6482 skip_eh:
6483 /* clean up zpodd on port removal */
6484 ata_for_each_link(link, ap, HOST_FIRST) {
6485 ata_for_each_dev(dev, link, ALL) {
6486 if (zpodd_dev_enabled(dev))
6487 zpodd_exit(dev);
6488 }
6489 }
6490 if (ap->pmp_link) {
6491 int i;
6492 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6493 ata_tlink_delete(&ap->pmp_link[i]);
6494 }
6495 /* remove the associated SCSI host */
6496 scsi_remove_host(ap->scsi_host);
6497 ata_tport_delete(ap);
6498 }
6499
6500 /**
6501 * ata_host_detach - Detach all ports of an ATA host
6502 * @host: Host to detach
6503 *
6504 * Detach all ports of @host.
6505 *
6506 * LOCKING:
6507 * Kernel thread context (may sleep).
6508 */
6509 void ata_host_detach(struct ata_host *host)
6510 {
6511 int i;
6512
6513 for (i = 0; i < host->n_ports; i++)
6514 ata_port_detach(host->ports[i]);
6515
6516 /* the host is dead now, dissociate ACPI */
6517 ata_acpi_dissociate(host);
6518 }
6519
6520 #ifdef CONFIG_PCI
6521
6522 /**
6523 * ata_pci_remove_one - PCI layer callback for device removal
6524 * @pdev: PCI device that was removed
6525 *
6526 * PCI layer indicates to libata via this hook that hot-unplug or
6527 * module unload event has occurred. Detach all ports. Resource
6528 * release is handled via devres.
6529 *
6530 * LOCKING:
6531 * Inherited from PCI layer (may sleep).
6532 */
6533 void ata_pci_remove_one(struct pci_dev *pdev)
6534 {
6535 struct ata_host *host = pci_get_drvdata(pdev);
6536
6537 ata_host_detach(host);
6538 }
6539
6540 /* move to PCI subsystem */
6541 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6542 {
6543 unsigned long tmp = 0;
6544
6545 switch (bits->width) {
6546 case 1: {
6547 u8 tmp8 = 0;
6548 pci_read_config_byte(pdev, bits->reg, &tmp8);
6549 tmp = tmp8;
6550 break;
6551 }
6552 case 2: {
6553 u16 tmp16 = 0;
6554 pci_read_config_word(pdev, bits->reg, &tmp16);
6555 tmp = tmp16;
6556 break;
6557 }
6558 case 4: {
6559 u32 tmp32 = 0;
6560 pci_read_config_dword(pdev, bits->reg, &tmp32);
6561 tmp = tmp32;
6562 break;
6563 }
6564
6565 default:
6566 return -EINVAL;
6567 }
6568
6569 tmp &= bits->mask;
6570
6571 return (tmp == bits->val) ? 1 : 0;
6572 }
6573
6574 #ifdef CONFIG_PM
6575 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6576 {
6577 pci_save_state(pdev);
6578 pci_disable_device(pdev);
6579
6580 if (mesg.event & PM_EVENT_SLEEP)
6581 pci_set_power_state(pdev, PCI_D3hot);
6582 }
6583
6584 int ata_pci_device_do_resume(struct pci_dev *pdev)
6585 {
6586 int rc;
6587
6588 pci_set_power_state(pdev, PCI_D0);
6589 pci_restore_state(pdev);
6590
6591 rc = pcim_enable_device(pdev);
6592 if (rc) {
6593 dev_err(&pdev->dev,
6594 "failed to enable device after resume (%d)\n", rc);
6595 return rc;
6596 }
6597
6598 pci_set_master(pdev);
6599 return 0;
6600 }
6601
6602 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6603 {
6604 struct ata_host *host = pci_get_drvdata(pdev);
6605 int rc = 0;
6606
6607 rc = ata_host_suspend(host, mesg);
6608 if (rc)
6609 return rc;
6610
6611 ata_pci_device_do_suspend(pdev, mesg);
6612
6613 return 0;
6614 }
6615
6616 int ata_pci_device_resume(struct pci_dev *pdev)
6617 {
6618 struct ata_host *host = pci_get_drvdata(pdev);
6619 int rc;
6620
6621 rc = ata_pci_device_do_resume(pdev);
6622 if (rc == 0)
6623 ata_host_resume(host);
6624 return rc;
6625 }
6626 #endif /* CONFIG_PM */
6627
6628 #endif /* CONFIG_PCI */
6629
6630 /**
6631 * ata_platform_remove_one - Platform layer callback for device removal
6632 * @pdev: Platform device that was removed
6633 *
6634 * Platform layer indicates to libata via this hook that hot-unplug or
6635 * module unload event has occurred. Detach all ports. Resource
6636 * release is handled via devres.
6637 *
6638 * LOCKING:
6639 * Inherited from platform layer (may sleep).
6640 */
6641 int ata_platform_remove_one(struct platform_device *pdev)
6642 {
6643 struct ata_host *host = platform_get_drvdata(pdev);
6644
6645 ata_host_detach(host);
6646
6647 return 0;
6648 }
6649
6650 static int __init ata_parse_force_one(char **cur,
6651 struct ata_force_ent *force_ent,
6652 const char **reason)
6653 {
6654 static const struct ata_force_param force_tbl[] __initconst = {
6655 { "40c", .cbl = ATA_CBL_PATA40 },
6656 { "80c", .cbl = ATA_CBL_PATA80 },
6657 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6658 { "unk", .cbl = ATA_CBL_PATA_UNK },
6659 { "ign", .cbl = ATA_CBL_PATA_IGN },
6660 { "sata", .cbl = ATA_CBL_SATA },
6661 { "1.5Gbps", .spd_limit = 1 },
6662 { "3.0Gbps", .spd_limit = 2 },
6663 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6664 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6665 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
6666 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
6667 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6668 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6669 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6670 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6671 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6672 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6673 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6674 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6675 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6676 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6677 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6678 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6679 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6680 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6681 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6682 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6683 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6684 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6685 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6686 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6687 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6688 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6689 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6690 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6691 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6692 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6693 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6694 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6695 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6696 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6697 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6698 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6699 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6700 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6701 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6702 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6703 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6704 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6705 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6706 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6707 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
6708 };
6709 char *start = *cur, *p = *cur;
6710 char *id, *val, *endp;
6711 const struct ata_force_param *match_fp = NULL;
6712 int nr_matches = 0, i;
6713
6714 /* find where this param ends and update *cur */
6715 while (*p != '\0' && *p != ',')
6716 p++;
6717
6718 if (*p == '\0')
6719 *cur = p;
6720 else
6721 *cur = p + 1;
6722
6723 *p = '\0';
6724
6725 /* parse */
6726 p = strchr(start, ':');
6727 if (!p) {
6728 val = strstrip(start);
6729 goto parse_val;
6730 }
6731 *p = '\0';
6732
6733 id = strstrip(start);
6734 val = strstrip(p + 1);
6735
6736 /* parse id */
6737 p = strchr(id, '.');
6738 if (p) {
6739 *p++ = '\0';
6740 force_ent->device = simple_strtoul(p, &endp, 10);
6741 if (p == endp || *endp != '\0') {
6742 *reason = "invalid device";
6743 return -EINVAL;
6744 }
6745 }
6746
6747 force_ent->port = simple_strtoul(id, &endp, 10);
6748 if (p == endp || *endp != '\0') {
6749 *reason = "invalid port/link";
6750 return -EINVAL;
6751 }
6752
6753 parse_val:
6754 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6755 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6756 const struct ata_force_param *fp = &force_tbl[i];
6757
6758 if (strncasecmp(val, fp->name, strlen(val)))
6759 continue;
6760
6761 nr_matches++;
6762 match_fp = fp;
6763
6764 if (strcasecmp(val, fp->name) == 0) {
6765 nr_matches = 1;
6766 break;
6767 }
6768 }
6769
6770 if (!nr_matches) {
6771 *reason = "unknown value";
6772 return -EINVAL;
6773 }
6774 if (nr_matches > 1) {
6775 *reason = "ambigious value";
6776 return -EINVAL;
6777 }
6778
6779 force_ent->param = *match_fp;
6780
6781 return 0;
6782 }
6783
6784 static void __init ata_parse_force_param(void)
6785 {
6786 int idx = 0, size = 1;
6787 int last_port = -1, last_device = -1;
6788 char *p, *cur, *next;
6789
6790 /* calculate maximum number of params and allocate force_tbl */
6791 for (p = ata_force_param_buf; *p; p++)
6792 if (*p == ',')
6793 size++;
6794
6795 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6796 if (!ata_force_tbl) {
6797 printk(KERN_WARNING "ata: failed to extend force table, "
6798 "libata.force ignored\n");
6799 return;
6800 }
6801
6802 /* parse and populate the table */
6803 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6804 const char *reason = "";
6805 struct ata_force_ent te = { .port = -1, .device = -1 };
6806
6807 next = cur;
6808 if (ata_parse_force_one(&next, &te, &reason)) {
6809 printk(KERN_WARNING "ata: failed to parse force "
6810 "parameter \"%s\" (%s)\n",
6811 cur, reason);
6812 continue;
6813 }
6814
6815 if (te.port == -1) {
6816 te.port = last_port;
6817 te.device = last_device;
6818 }
6819
6820 ata_force_tbl[idx++] = te;
6821
6822 last_port = te.port;
6823 last_device = te.device;
6824 }
6825
6826 ata_force_tbl_size = idx;
6827 }
6828
6829 static int __init ata_init(void)
6830 {
6831 int rc;
6832
6833 ata_parse_force_param();
6834
6835 rc = ata_sff_init();
6836 if (rc) {
6837 kfree(ata_force_tbl);
6838 return rc;
6839 }
6840
6841 libata_transport_init();
6842 ata_scsi_transport_template = ata_attach_transport();
6843 if (!ata_scsi_transport_template) {
6844 ata_sff_exit();
6845 rc = -ENOMEM;
6846 goto err_out;
6847 }
6848
6849 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6850 return 0;
6851
6852 err_out:
6853 return rc;
6854 }
6855
6856 static void __exit ata_exit(void)
6857 {
6858 ata_release_transport(ata_scsi_transport_template);
6859 libata_transport_exit();
6860 ata_sff_exit();
6861 kfree(ata_force_tbl);
6862 }
6863
6864 subsys_initcall(ata_init);
6865 module_exit(ata_exit);
6866
6867 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6868
6869 int ata_ratelimit(void)
6870 {
6871 return __ratelimit(&ratelimit);
6872 }
6873
6874 /**
6875 * ata_msleep - ATA EH owner aware msleep
6876 * @ap: ATA port to attribute the sleep to
6877 * @msecs: duration to sleep in milliseconds
6878 *
6879 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6880 * ownership is released before going to sleep and reacquired
6881 * after the sleep is complete. IOW, other ports sharing the
6882 * @ap->host will be allowed to own the EH while this task is
6883 * sleeping.
6884 *
6885 * LOCKING:
6886 * Might sleep.
6887 */
6888 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6889 {
6890 bool owns_eh = ap && ap->host->eh_owner == current;
6891
6892 if (owns_eh)
6893 ata_eh_release(ap);
6894
6895 if (msecs < 20) {
6896 unsigned long usecs = msecs * USEC_PER_MSEC;
6897 usleep_range(usecs, usecs + 50);
6898 } else {
6899 msleep(msecs);
6900 }
6901
6902 if (owns_eh)
6903 ata_eh_acquire(ap);
6904 }
6905
6906 /**
6907 * ata_wait_register - wait until register value changes
6908 * @ap: ATA port to wait register for, can be NULL
6909 * @reg: IO-mapped register
6910 * @mask: Mask to apply to read register value
6911 * @val: Wait condition
6912 * @interval: polling interval in milliseconds
6913 * @timeout: timeout in milliseconds
6914 *
6915 * Waiting for some bits of register to change is a common
6916 * operation for ATA controllers. This function reads 32bit LE
6917 * IO-mapped register @reg and tests for the following condition.
6918 *
6919 * (*@reg & mask) != val
6920 *
6921 * If the condition is met, it returns; otherwise, the process is
6922 * repeated after @interval_msec until timeout.
6923 *
6924 * LOCKING:
6925 * Kernel thread context (may sleep)
6926 *
6927 * RETURNS:
6928 * The final register value.
6929 */
6930 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6931 unsigned long interval, unsigned long timeout)
6932 {
6933 unsigned long deadline;
6934 u32 tmp;
6935
6936 tmp = ioread32(reg);
6937
6938 /* Calculate timeout _after_ the first read to make sure
6939 * preceding writes reach the controller before starting to
6940 * eat away the timeout.
6941 */
6942 deadline = ata_deadline(jiffies, timeout);
6943
6944 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6945 ata_msleep(ap, interval);
6946 tmp = ioread32(reg);
6947 }
6948
6949 return tmp;
6950 }
6951
6952 /**
6953 * sata_lpm_ignore_phy_events - test if PHY event should be ignored
6954 * @link: Link receiving the event
6955 *
6956 * Test whether the received PHY event has to be ignored or not.
6957 *
6958 * LOCKING:
6959 * None:
6960 *
6961 * RETURNS:
6962 * True if the event has to be ignored.
6963 */
6964 bool sata_lpm_ignore_phy_events(struct ata_link *link)
6965 {
6966 unsigned long lpm_timeout = link->last_lpm_change +
6967 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
6968
6969 /* if LPM is enabled, PHYRDY doesn't mean anything */
6970 if (link->lpm_policy > ATA_LPM_MAX_POWER)
6971 return true;
6972
6973 /* ignore the first PHY event after the LPM policy changed
6974 * as it is might be spurious
6975 */
6976 if ((link->flags & ATA_LFLAG_CHANGED) &&
6977 time_before(jiffies, lpm_timeout))
6978 return true;
6979
6980 return false;
6981 }
6982 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
6983
6984 /*
6985 * Dummy port_ops
6986 */
6987 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6988 {
6989 return AC_ERR_SYSTEM;
6990 }
6991
6992 static void ata_dummy_error_handler(struct ata_port *ap)
6993 {
6994 /* truly dummy */
6995 }
6996
6997 struct ata_port_operations ata_dummy_port_ops = {
6998 .qc_prep = ata_noop_qc_prep,
6999 .qc_issue = ata_dummy_qc_issue,
7000 .error_handler = ata_dummy_error_handler,
7001 .sched_eh = ata_std_sched_eh,
7002 .end_eh = ata_std_end_eh,
7003 };
7004
7005 const struct ata_port_info ata_dummy_port_info = {
7006 .port_ops = &ata_dummy_port_ops,
7007 };
7008
7009 /*
7010 * Utility print functions
7011 */
7012 void ata_port_printk(const struct ata_port *ap, const char *level,
7013 const char *fmt, ...)
7014 {
7015 struct va_format vaf;
7016 va_list args;
7017
7018 va_start(args, fmt);
7019
7020 vaf.fmt = fmt;
7021 vaf.va = &args;
7022
7023 printk("%sata%u: %pV", level, ap->print_id, &vaf);
7024
7025 va_end(args);
7026 }
7027 EXPORT_SYMBOL(ata_port_printk);
7028
7029 void ata_link_printk(const struct ata_link *link, const char *level,
7030 const char *fmt, ...)
7031 {
7032 struct va_format vaf;
7033 va_list args;
7034
7035 va_start(args, fmt);
7036
7037 vaf.fmt = fmt;
7038 vaf.va = &args;
7039
7040 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
7041 printk("%sata%u.%02u: %pV",
7042 level, link->ap->print_id, link->pmp, &vaf);
7043 else
7044 printk("%sata%u: %pV",
7045 level, link->ap->print_id, &vaf);
7046
7047 va_end(args);
7048 }
7049 EXPORT_SYMBOL(ata_link_printk);
7050
7051 void ata_dev_printk(const struct ata_device *dev, const char *level,
7052 const char *fmt, ...)
7053 {
7054 struct va_format vaf;
7055 va_list args;
7056
7057 va_start(args, fmt);
7058
7059 vaf.fmt = fmt;
7060 vaf.va = &args;
7061
7062 printk("%sata%u.%02u: %pV",
7063 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
7064 &vaf);
7065
7066 va_end(args);
7067 }
7068 EXPORT_SYMBOL(ata_dev_printk);
7069
7070 void ata_print_version(const struct device *dev, const char *version)
7071 {
7072 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
7073 }
7074 EXPORT_SYMBOL(ata_print_version);
7075
7076 /*
7077 * libata is essentially a library of internal helper functions for
7078 * low-level ATA host controller drivers. As such, the API/ABI is
7079 * likely to change as new drivers are added and updated.
7080 * Do not depend on ABI/API stability.
7081 */
7082 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7083 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7084 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7085 EXPORT_SYMBOL_GPL(ata_base_port_ops);
7086 EXPORT_SYMBOL_GPL(sata_port_ops);
7087 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7088 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7089 EXPORT_SYMBOL_GPL(ata_link_next);
7090 EXPORT_SYMBOL_GPL(ata_dev_next);
7091 EXPORT_SYMBOL_GPL(ata_std_bios_param);
7092 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
7093 EXPORT_SYMBOL_GPL(ata_host_init);
7094 EXPORT_SYMBOL_GPL(ata_host_alloc);
7095 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7096 EXPORT_SYMBOL_GPL(ata_slave_link_init);
7097 EXPORT_SYMBOL_GPL(ata_host_start);
7098 EXPORT_SYMBOL_GPL(ata_host_register);
7099 EXPORT_SYMBOL_GPL(ata_host_activate);
7100 EXPORT_SYMBOL_GPL(ata_host_detach);
7101 EXPORT_SYMBOL_GPL(ata_sg_init);
7102 EXPORT_SYMBOL_GPL(ata_qc_complete);
7103 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7104 EXPORT_SYMBOL_GPL(atapi_cmd_type);
7105 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7106 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7107 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7108 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7109 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7110 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7111 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7112 EXPORT_SYMBOL_GPL(ata_mode_string);
7113 EXPORT_SYMBOL_GPL(ata_id_xfermask);
7114 EXPORT_SYMBOL_GPL(ata_do_set_mode);
7115 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7116 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7117 EXPORT_SYMBOL_GPL(ata_dev_disable);
7118 EXPORT_SYMBOL_GPL(sata_set_spd);
7119 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7120 EXPORT_SYMBOL_GPL(sata_link_debounce);
7121 EXPORT_SYMBOL_GPL(sata_link_resume);
7122 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
7123 EXPORT_SYMBOL_GPL(ata_std_prereset);
7124 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7125 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7126 EXPORT_SYMBOL_GPL(ata_std_postreset);
7127 EXPORT_SYMBOL_GPL(ata_dev_classify);
7128 EXPORT_SYMBOL_GPL(ata_dev_pair);
7129 EXPORT_SYMBOL_GPL(ata_ratelimit);
7130 EXPORT_SYMBOL_GPL(ata_msleep);
7131 EXPORT_SYMBOL_GPL(ata_wait_register);
7132 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7133 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7134 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7135 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7136 EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
7137 EXPORT_SYMBOL_GPL(sata_scr_valid);
7138 EXPORT_SYMBOL_GPL(sata_scr_read);
7139 EXPORT_SYMBOL_GPL(sata_scr_write);
7140 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7141 EXPORT_SYMBOL_GPL(ata_link_online);
7142 EXPORT_SYMBOL_GPL(ata_link_offline);
7143 #ifdef CONFIG_PM
7144 EXPORT_SYMBOL_GPL(ata_host_suspend);
7145 EXPORT_SYMBOL_GPL(ata_host_resume);
7146 #endif /* CONFIG_PM */
7147 EXPORT_SYMBOL_GPL(ata_id_string);
7148 EXPORT_SYMBOL_GPL(ata_id_c_string);
7149 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
7150 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7151
7152 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7153 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7154 EXPORT_SYMBOL_GPL(ata_timing_compute);
7155 EXPORT_SYMBOL_GPL(ata_timing_merge);
7156 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7157
7158 #ifdef CONFIG_PCI
7159 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7160 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7161 #ifdef CONFIG_PM
7162 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7163 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7164 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7165 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7166 #endif /* CONFIG_PM */
7167 #endif /* CONFIG_PCI */
7168
7169 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
7170
7171 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7172 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7173 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7174 EXPORT_SYMBOL_GPL(ata_port_desc);
7175 #ifdef CONFIG_PCI
7176 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7177 #endif /* CONFIG_PCI */
7178 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7179 EXPORT_SYMBOL_GPL(ata_link_abort);
7180 EXPORT_SYMBOL_GPL(ata_port_abort);
7181 EXPORT_SYMBOL_GPL(ata_port_freeze);
7182 EXPORT_SYMBOL_GPL(sata_async_notification);
7183 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7184 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7185 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7186 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7187 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
7188 EXPORT_SYMBOL_GPL(ata_do_eh);
7189 EXPORT_SYMBOL_GPL(ata_std_error_handler);
7190
7191 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7192 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7193 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7194 EXPORT_SYMBOL_GPL(ata_cable_ignore);
7195 EXPORT_SYMBOL_GPL(ata_cable_sata);