]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/ata/libata-core.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / drivers / ata / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/time.h>
54 #include <linux/interrupt.h>
55 #include <linux/completion.h>
56 #include <linux/suspend.h>
57 #include <linux/workqueue.h>
58 #include <linux/scatterlist.h>
59 #include <linux/io.h>
60 #include <linux/async.h>
61 #include <linux/log2.h>
62 #include <linux/slab.h>
63 #include <linux/glob.h>
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_host.h>
67 #include <linux/libata.h>
68 #include <asm/byteorder.h>
69 #include <asm/unaligned.h>
70 #include <linux/cdrom.h>
71 #include <linux/ratelimit.h>
72 #include <linux/leds.h>
73 #include <linux/pm_runtime.h>
74 #include <linux/platform_device.h>
75
76 #define CREATE_TRACE_POINTS
77 #include <trace/events/libata.h>
78
79 #include "libata.h"
80 #include "libata-transport.h"
81
82 /* debounce timing parameters in msecs { interval, duration, timeout } */
83 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
84 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
85 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
86
87 const struct ata_port_operations ata_base_port_ops = {
88 .prereset = ata_std_prereset,
89 .postreset = ata_std_postreset,
90 .error_handler = ata_std_error_handler,
91 .sched_eh = ata_std_sched_eh,
92 .end_eh = ata_std_end_eh,
93 };
94
95 const struct ata_port_operations sata_port_ops = {
96 .inherits = &ata_base_port_ops,
97
98 .qc_defer = ata_std_qc_defer,
99 .hardreset = sata_std_hardreset,
100 };
101
102 static unsigned int ata_dev_init_params(struct ata_device *dev,
103 u16 heads, u16 sectors);
104 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
105 static void ata_dev_xfermask(struct ata_device *dev);
106 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
107
108 atomic_t ata_print_id = ATOMIC_INIT(0);
109
110 struct ata_force_param {
111 const char *name;
112 unsigned int cbl;
113 int spd_limit;
114 unsigned long xfer_mask;
115 unsigned int horkage_on;
116 unsigned int horkage_off;
117 unsigned int lflags;
118 };
119
120 struct ata_force_ent {
121 int port;
122 int device;
123 struct ata_force_param param;
124 };
125
126 static struct ata_force_ent *ata_force_tbl;
127 static int ata_force_tbl_size;
128
129 static char ata_force_param_buf[PAGE_SIZE] __initdata;
130 /* param_buf is thrown away after initialization, disallow read */
131 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
132 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
133
134 static int atapi_enabled = 1;
135 module_param(atapi_enabled, int, 0444);
136 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
137
138 static int atapi_dmadir = 0;
139 module_param(atapi_dmadir, int, 0444);
140 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
141
142 int atapi_passthru16 = 1;
143 module_param(atapi_passthru16, int, 0444);
144 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
145
146 int libata_fua = 0;
147 module_param_named(fua, libata_fua, int, 0444);
148 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
149
150 static int ata_ignore_hpa;
151 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
152 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
153
154 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
155 module_param_named(dma, libata_dma_mask, int, 0444);
156 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
157
158 static int ata_probe_timeout;
159 module_param(ata_probe_timeout, int, 0444);
160 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
161
162 int libata_noacpi = 0;
163 module_param_named(noacpi, libata_noacpi, int, 0444);
164 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
165
166 int libata_allow_tpm = 0;
167 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
168 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
169
170 static int atapi_an;
171 module_param(atapi_an, int, 0444);
172 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
173
174 MODULE_AUTHOR("Jeff Garzik");
175 MODULE_DESCRIPTION("Library module for ATA devices");
176 MODULE_LICENSE("GPL");
177 MODULE_VERSION(DRV_VERSION);
178
179
180 static bool ata_sstatus_online(u32 sstatus)
181 {
182 return (sstatus & 0xf) == 0x3;
183 }
184
185 /**
186 * ata_link_next - link iteration helper
187 * @link: the previous link, NULL to start
188 * @ap: ATA port containing links to iterate
189 * @mode: iteration mode, one of ATA_LITER_*
190 *
191 * LOCKING:
192 * Host lock or EH context.
193 *
194 * RETURNS:
195 * Pointer to the next link.
196 */
197 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
198 enum ata_link_iter_mode mode)
199 {
200 BUG_ON(mode != ATA_LITER_EDGE &&
201 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
202
203 /* NULL link indicates start of iteration */
204 if (!link)
205 switch (mode) {
206 case ATA_LITER_EDGE:
207 case ATA_LITER_PMP_FIRST:
208 if (sata_pmp_attached(ap))
209 return ap->pmp_link;
210 /* fall through */
211 case ATA_LITER_HOST_FIRST:
212 return &ap->link;
213 }
214
215 /* we just iterated over the host link, what's next? */
216 if (link == &ap->link)
217 switch (mode) {
218 case ATA_LITER_HOST_FIRST:
219 if (sata_pmp_attached(ap))
220 return ap->pmp_link;
221 /* fall through */
222 case ATA_LITER_PMP_FIRST:
223 if (unlikely(ap->slave_link))
224 return ap->slave_link;
225 /* fall through */
226 case ATA_LITER_EDGE:
227 return NULL;
228 }
229
230 /* slave_link excludes PMP */
231 if (unlikely(link == ap->slave_link))
232 return NULL;
233
234 /* we were over a PMP link */
235 if (++link < ap->pmp_link + ap->nr_pmp_links)
236 return link;
237
238 if (mode == ATA_LITER_PMP_FIRST)
239 return &ap->link;
240
241 return NULL;
242 }
243
244 /**
245 * ata_dev_next - device iteration helper
246 * @dev: the previous device, NULL to start
247 * @link: ATA link containing devices to iterate
248 * @mode: iteration mode, one of ATA_DITER_*
249 *
250 * LOCKING:
251 * Host lock or EH context.
252 *
253 * RETURNS:
254 * Pointer to the next device.
255 */
256 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
257 enum ata_dev_iter_mode mode)
258 {
259 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
260 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
261
262 /* NULL dev indicates start of iteration */
263 if (!dev)
264 switch (mode) {
265 case ATA_DITER_ENABLED:
266 case ATA_DITER_ALL:
267 dev = link->device;
268 goto check;
269 case ATA_DITER_ENABLED_REVERSE:
270 case ATA_DITER_ALL_REVERSE:
271 dev = link->device + ata_link_max_devices(link) - 1;
272 goto check;
273 }
274
275 next:
276 /* move to the next one */
277 switch (mode) {
278 case ATA_DITER_ENABLED:
279 case ATA_DITER_ALL:
280 if (++dev < link->device + ata_link_max_devices(link))
281 goto check;
282 return NULL;
283 case ATA_DITER_ENABLED_REVERSE:
284 case ATA_DITER_ALL_REVERSE:
285 if (--dev >= link->device)
286 goto check;
287 return NULL;
288 }
289
290 check:
291 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
292 !ata_dev_enabled(dev))
293 goto next;
294 return dev;
295 }
296
297 /**
298 * ata_dev_phys_link - find physical link for a device
299 * @dev: ATA device to look up physical link for
300 *
301 * Look up physical link which @dev is attached to. Note that
302 * this is different from @dev->link only when @dev is on slave
303 * link. For all other cases, it's the same as @dev->link.
304 *
305 * LOCKING:
306 * Don't care.
307 *
308 * RETURNS:
309 * Pointer to the found physical link.
310 */
311 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
312 {
313 struct ata_port *ap = dev->link->ap;
314
315 if (!ap->slave_link)
316 return dev->link;
317 if (!dev->devno)
318 return &ap->link;
319 return ap->slave_link;
320 }
321
322 /**
323 * ata_force_cbl - force cable type according to libata.force
324 * @ap: ATA port of interest
325 *
326 * Force cable type according to libata.force and whine about it.
327 * The last entry which has matching port number is used, so it
328 * can be specified as part of device force parameters. For
329 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
330 * same effect.
331 *
332 * LOCKING:
333 * EH context.
334 */
335 void ata_force_cbl(struct ata_port *ap)
336 {
337 int i;
338
339 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
340 const struct ata_force_ent *fe = &ata_force_tbl[i];
341
342 if (fe->port != -1 && fe->port != ap->print_id)
343 continue;
344
345 if (fe->param.cbl == ATA_CBL_NONE)
346 continue;
347
348 ap->cbl = fe->param.cbl;
349 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
350 return;
351 }
352 }
353
354 /**
355 * ata_force_link_limits - force link limits according to libata.force
356 * @link: ATA link of interest
357 *
358 * Force link flags and SATA spd limit according to libata.force
359 * and whine about it. When only the port part is specified
360 * (e.g. 1:), the limit applies to all links connected to both
361 * the host link and all fan-out ports connected via PMP. If the
362 * device part is specified as 0 (e.g. 1.00:), it specifies the
363 * first fan-out link not the host link. Device number 15 always
364 * points to the host link whether PMP is attached or not. If the
365 * controller has slave link, device number 16 points to it.
366 *
367 * LOCKING:
368 * EH context.
369 */
370 static void ata_force_link_limits(struct ata_link *link)
371 {
372 bool did_spd = false;
373 int linkno = link->pmp;
374 int i;
375
376 if (ata_is_host_link(link))
377 linkno += 15;
378
379 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
380 const struct ata_force_ent *fe = &ata_force_tbl[i];
381
382 if (fe->port != -1 && fe->port != link->ap->print_id)
383 continue;
384
385 if (fe->device != -1 && fe->device != linkno)
386 continue;
387
388 /* only honor the first spd limit */
389 if (!did_spd && fe->param.spd_limit) {
390 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
391 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
392 fe->param.name);
393 did_spd = true;
394 }
395
396 /* let lflags stack */
397 if (fe->param.lflags) {
398 link->flags |= fe->param.lflags;
399 ata_link_notice(link,
400 "FORCE: link flag 0x%x forced -> 0x%x\n",
401 fe->param.lflags, link->flags);
402 }
403 }
404 }
405
406 /**
407 * ata_force_xfermask - force xfermask according to libata.force
408 * @dev: ATA device of interest
409 *
410 * Force xfer_mask according to libata.force and whine about it.
411 * For consistency with link selection, device number 15 selects
412 * the first device connected to the host link.
413 *
414 * LOCKING:
415 * EH context.
416 */
417 static void ata_force_xfermask(struct ata_device *dev)
418 {
419 int devno = dev->link->pmp + dev->devno;
420 int alt_devno = devno;
421 int i;
422
423 /* allow n.15/16 for devices attached to host port */
424 if (ata_is_host_link(dev->link))
425 alt_devno += 15;
426
427 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
428 const struct ata_force_ent *fe = &ata_force_tbl[i];
429 unsigned long pio_mask, mwdma_mask, udma_mask;
430
431 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
432 continue;
433
434 if (fe->device != -1 && fe->device != devno &&
435 fe->device != alt_devno)
436 continue;
437
438 if (!fe->param.xfer_mask)
439 continue;
440
441 ata_unpack_xfermask(fe->param.xfer_mask,
442 &pio_mask, &mwdma_mask, &udma_mask);
443 if (udma_mask)
444 dev->udma_mask = udma_mask;
445 else if (mwdma_mask) {
446 dev->udma_mask = 0;
447 dev->mwdma_mask = mwdma_mask;
448 } else {
449 dev->udma_mask = 0;
450 dev->mwdma_mask = 0;
451 dev->pio_mask = pio_mask;
452 }
453
454 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
455 fe->param.name);
456 return;
457 }
458 }
459
460 /**
461 * ata_force_horkage - force horkage according to libata.force
462 * @dev: ATA device of interest
463 *
464 * Force horkage according to libata.force and whine about it.
465 * For consistency with link selection, device number 15 selects
466 * the first device connected to the host link.
467 *
468 * LOCKING:
469 * EH context.
470 */
471 static void ata_force_horkage(struct ata_device *dev)
472 {
473 int devno = dev->link->pmp + dev->devno;
474 int alt_devno = devno;
475 int i;
476
477 /* allow n.15/16 for devices attached to host port */
478 if (ata_is_host_link(dev->link))
479 alt_devno += 15;
480
481 for (i = 0; i < ata_force_tbl_size; i++) {
482 const struct ata_force_ent *fe = &ata_force_tbl[i];
483
484 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
485 continue;
486
487 if (fe->device != -1 && fe->device != devno &&
488 fe->device != alt_devno)
489 continue;
490
491 if (!(~dev->horkage & fe->param.horkage_on) &&
492 !(dev->horkage & fe->param.horkage_off))
493 continue;
494
495 dev->horkage |= fe->param.horkage_on;
496 dev->horkage &= ~fe->param.horkage_off;
497
498 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
499 fe->param.name);
500 }
501 }
502
503 /**
504 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
505 * @opcode: SCSI opcode
506 *
507 * Determine ATAPI command type from @opcode.
508 *
509 * LOCKING:
510 * None.
511 *
512 * RETURNS:
513 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
514 */
515 int atapi_cmd_type(u8 opcode)
516 {
517 switch (opcode) {
518 case GPCMD_READ_10:
519 case GPCMD_READ_12:
520 return ATAPI_READ;
521
522 case GPCMD_WRITE_10:
523 case GPCMD_WRITE_12:
524 case GPCMD_WRITE_AND_VERIFY_10:
525 return ATAPI_WRITE;
526
527 case GPCMD_READ_CD:
528 case GPCMD_READ_CD_MSF:
529 return ATAPI_READ_CD;
530
531 case ATA_16:
532 case ATA_12:
533 if (atapi_passthru16)
534 return ATAPI_PASS_THRU;
535 /* fall thru */
536 default:
537 return ATAPI_MISC;
538 }
539 }
540
541 /**
542 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
543 * @tf: Taskfile to convert
544 * @pmp: Port multiplier port
545 * @is_cmd: This FIS is for command
546 * @fis: Buffer into which data will output
547 *
548 * Converts a standard ATA taskfile to a Serial ATA
549 * FIS structure (Register - Host to Device).
550 *
551 * LOCKING:
552 * Inherited from caller.
553 */
554 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
555 {
556 fis[0] = 0x27; /* Register - Host to Device FIS */
557 fis[1] = pmp & 0xf; /* Port multiplier number*/
558 if (is_cmd)
559 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
560
561 fis[2] = tf->command;
562 fis[3] = tf->feature;
563
564 fis[4] = tf->lbal;
565 fis[5] = tf->lbam;
566 fis[6] = tf->lbah;
567 fis[7] = tf->device;
568
569 fis[8] = tf->hob_lbal;
570 fis[9] = tf->hob_lbam;
571 fis[10] = tf->hob_lbah;
572 fis[11] = tf->hob_feature;
573
574 fis[12] = tf->nsect;
575 fis[13] = tf->hob_nsect;
576 fis[14] = 0;
577 fis[15] = tf->ctl;
578
579 fis[16] = tf->auxiliary & 0xff;
580 fis[17] = (tf->auxiliary >> 8) & 0xff;
581 fis[18] = (tf->auxiliary >> 16) & 0xff;
582 fis[19] = (tf->auxiliary >> 24) & 0xff;
583 }
584
585 /**
586 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
587 * @fis: Buffer from which data will be input
588 * @tf: Taskfile to output
589 *
590 * Converts a serial ATA FIS structure to a standard ATA taskfile.
591 *
592 * LOCKING:
593 * Inherited from caller.
594 */
595
596 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
597 {
598 tf->command = fis[2]; /* status */
599 tf->feature = fis[3]; /* error */
600
601 tf->lbal = fis[4];
602 tf->lbam = fis[5];
603 tf->lbah = fis[6];
604 tf->device = fis[7];
605
606 tf->hob_lbal = fis[8];
607 tf->hob_lbam = fis[9];
608 tf->hob_lbah = fis[10];
609
610 tf->nsect = fis[12];
611 tf->hob_nsect = fis[13];
612 }
613
614 static const u8 ata_rw_cmds[] = {
615 /* pio multi */
616 ATA_CMD_READ_MULTI,
617 ATA_CMD_WRITE_MULTI,
618 ATA_CMD_READ_MULTI_EXT,
619 ATA_CMD_WRITE_MULTI_EXT,
620 0,
621 0,
622 0,
623 ATA_CMD_WRITE_MULTI_FUA_EXT,
624 /* pio */
625 ATA_CMD_PIO_READ,
626 ATA_CMD_PIO_WRITE,
627 ATA_CMD_PIO_READ_EXT,
628 ATA_CMD_PIO_WRITE_EXT,
629 0,
630 0,
631 0,
632 0,
633 /* dma */
634 ATA_CMD_READ,
635 ATA_CMD_WRITE,
636 ATA_CMD_READ_EXT,
637 ATA_CMD_WRITE_EXT,
638 0,
639 0,
640 0,
641 ATA_CMD_WRITE_FUA_EXT
642 };
643
644 /**
645 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
646 * @tf: command to examine and configure
647 * @dev: device tf belongs to
648 *
649 * Examine the device configuration and tf->flags to calculate
650 * the proper read/write commands and protocol to use.
651 *
652 * LOCKING:
653 * caller.
654 */
655 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
656 {
657 u8 cmd;
658
659 int index, fua, lba48, write;
660
661 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
662 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
663 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
664
665 if (dev->flags & ATA_DFLAG_PIO) {
666 tf->protocol = ATA_PROT_PIO;
667 index = dev->multi_count ? 0 : 8;
668 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
669 /* Unable to use DMA due to host limitation */
670 tf->protocol = ATA_PROT_PIO;
671 index = dev->multi_count ? 0 : 8;
672 } else {
673 tf->protocol = ATA_PROT_DMA;
674 index = 16;
675 }
676
677 cmd = ata_rw_cmds[index + fua + lba48 + write];
678 if (cmd) {
679 tf->command = cmd;
680 return 0;
681 }
682 return -1;
683 }
684
685 /**
686 * ata_tf_read_block - Read block address from ATA taskfile
687 * @tf: ATA taskfile of interest
688 * @dev: ATA device @tf belongs to
689 *
690 * LOCKING:
691 * None.
692 *
693 * Read block address from @tf. This function can handle all
694 * three address formats - LBA, LBA48 and CHS. tf->protocol and
695 * flags select the address format to use.
696 *
697 * RETURNS:
698 * Block address read from @tf.
699 */
700 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
701 {
702 u64 block = 0;
703
704 if (tf->flags & ATA_TFLAG_LBA) {
705 if (tf->flags & ATA_TFLAG_LBA48) {
706 block |= (u64)tf->hob_lbah << 40;
707 block |= (u64)tf->hob_lbam << 32;
708 block |= (u64)tf->hob_lbal << 24;
709 } else
710 block |= (tf->device & 0xf) << 24;
711
712 block |= tf->lbah << 16;
713 block |= tf->lbam << 8;
714 block |= tf->lbal;
715 } else {
716 u32 cyl, head, sect;
717
718 cyl = tf->lbam | (tf->lbah << 8);
719 head = tf->device & 0xf;
720 sect = tf->lbal;
721
722 if (!sect) {
723 ata_dev_warn(dev,
724 "device reported invalid CHS sector 0\n");
725 return U64_MAX;
726 }
727
728 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
729 }
730
731 return block;
732 }
733
734 /**
735 * ata_build_rw_tf - Build ATA taskfile for given read/write request
736 * @tf: Target ATA taskfile
737 * @dev: ATA device @tf belongs to
738 * @block: Block address
739 * @n_block: Number of blocks
740 * @tf_flags: RW/FUA etc...
741 * @tag: tag
742 * @class: IO priority class
743 *
744 * LOCKING:
745 * None.
746 *
747 * Build ATA taskfile @tf for read/write request described by
748 * @block, @n_block, @tf_flags and @tag on @dev.
749 *
750 * RETURNS:
751 *
752 * 0 on success, -ERANGE if the request is too large for @dev,
753 * -EINVAL if the request is invalid.
754 */
755 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
756 u64 block, u32 n_block, unsigned int tf_flags,
757 unsigned int tag, int class)
758 {
759 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
760 tf->flags |= tf_flags;
761
762 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
763 /* yay, NCQ */
764 if (!lba_48_ok(block, n_block))
765 return -ERANGE;
766
767 tf->protocol = ATA_PROT_NCQ;
768 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
769
770 if (tf->flags & ATA_TFLAG_WRITE)
771 tf->command = ATA_CMD_FPDMA_WRITE;
772 else
773 tf->command = ATA_CMD_FPDMA_READ;
774
775 tf->nsect = tag << 3;
776 tf->hob_feature = (n_block >> 8) & 0xff;
777 tf->feature = n_block & 0xff;
778
779 tf->hob_lbah = (block >> 40) & 0xff;
780 tf->hob_lbam = (block >> 32) & 0xff;
781 tf->hob_lbal = (block >> 24) & 0xff;
782 tf->lbah = (block >> 16) & 0xff;
783 tf->lbam = (block >> 8) & 0xff;
784 tf->lbal = block & 0xff;
785
786 tf->device = ATA_LBA;
787 if (tf->flags & ATA_TFLAG_FUA)
788 tf->device |= 1 << 7;
789
790 if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
791 if (class == IOPRIO_CLASS_RT)
792 tf->hob_nsect |= ATA_PRIO_HIGH <<
793 ATA_SHIFT_PRIO;
794 }
795 } else if (dev->flags & ATA_DFLAG_LBA) {
796 tf->flags |= ATA_TFLAG_LBA;
797
798 if (lba_28_ok(block, n_block)) {
799 /* use LBA28 */
800 tf->device |= (block >> 24) & 0xf;
801 } else if (lba_48_ok(block, n_block)) {
802 if (!(dev->flags & ATA_DFLAG_LBA48))
803 return -ERANGE;
804
805 /* use LBA48 */
806 tf->flags |= ATA_TFLAG_LBA48;
807
808 tf->hob_nsect = (n_block >> 8) & 0xff;
809
810 tf->hob_lbah = (block >> 40) & 0xff;
811 tf->hob_lbam = (block >> 32) & 0xff;
812 tf->hob_lbal = (block >> 24) & 0xff;
813 } else
814 /* request too large even for LBA48 */
815 return -ERANGE;
816
817 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
818 return -EINVAL;
819
820 tf->nsect = n_block & 0xff;
821
822 tf->lbah = (block >> 16) & 0xff;
823 tf->lbam = (block >> 8) & 0xff;
824 tf->lbal = block & 0xff;
825
826 tf->device |= ATA_LBA;
827 } else {
828 /* CHS */
829 u32 sect, head, cyl, track;
830
831 /* The request -may- be too large for CHS addressing. */
832 if (!lba_28_ok(block, n_block))
833 return -ERANGE;
834
835 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
836 return -EINVAL;
837
838 /* Convert LBA to CHS */
839 track = (u32)block / dev->sectors;
840 cyl = track / dev->heads;
841 head = track % dev->heads;
842 sect = (u32)block % dev->sectors + 1;
843
844 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
845 (u32)block, track, cyl, head, sect);
846
847 /* Check whether the converted CHS can fit.
848 Cylinder: 0-65535
849 Head: 0-15
850 Sector: 1-255*/
851 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
852 return -ERANGE;
853
854 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
855 tf->lbal = sect;
856 tf->lbam = cyl;
857 tf->lbah = cyl >> 8;
858 tf->device |= head;
859 }
860
861 return 0;
862 }
863
864 /**
865 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
866 * @pio_mask: pio_mask
867 * @mwdma_mask: mwdma_mask
868 * @udma_mask: udma_mask
869 *
870 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
871 * unsigned int xfer_mask.
872 *
873 * LOCKING:
874 * None.
875 *
876 * RETURNS:
877 * Packed xfer_mask.
878 */
879 unsigned long ata_pack_xfermask(unsigned long pio_mask,
880 unsigned long mwdma_mask,
881 unsigned long udma_mask)
882 {
883 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
884 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
885 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
886 }
887
888 /**
889 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
890 * @xfer_mask: xfer_mask to unpack
891 * @pio_mask: resulting pio_mask
892 * @mwdma_mask: resulting mwdma_mask
893 * @udma_mask: resulting udma_mask
894 *
895 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
896 * Any NULL destination masks will be ignored.
897 */
898 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
899 unsigned long *mwdma_mask, unsigned long *udma_mask)
900 {
901 if (pio_mask)
902 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
903 if (mwdma_mask)
904 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
905 if (udma_mask)
906 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
907 }
908
909 static const struct ata_xfer_ent {
910 int shift, bits;
911 u8 base;
912 } ata_xfer_tbl[] = {
913 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
914 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
915 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
916 { -1, },
917 };
918
919 /**
920 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
921 * @xfer_mask: xfer_mask of interest
922 *
923 * Return matching XFER_* value for @xfer_mask. Only the highest
924 * bit of @xfer_mask is considered.
925 *
926 * LOCKING:
927 * None.
928 *
929 * RETURNS:
930 * Matching XFER_* value, 0xff if no match found.
931 */
932 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
933 {
934 int highbit = fls(xfer_mask) - 1;
935 const struct ata_xfer_ent *ent;
936
937 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
938 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
939 return ent->base + highbit - ent->shift;
940 return 0xff;
941 }
942
943 /**
944 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
945 * @xfer_mode: XFER_* of interest
946 *
947 * Return matching xfer_mask for @xfer_mode.
948 *
949 * LOCKING:
950 * None.
951 *
952 * RETURNS:
953 * Matching xfer_mask, 0 if no match found.
954 */
955 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
956 {
957 const struct ata_xfer_ent *ent;
958
959 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
960 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
961 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
962 & ~((1 << ent->shift) - 1);
963 return 0;
964 }
965
966 /**
967 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
968 * @xfer_mode: XFER_* of interest
969 *
970 * Return matching xfer_shift for @xfer_mode.
971 *
972 * LOCKING:
973 * None.
974 *
975 * RETURNS:
976 * Matching xfer_shift, -1 if no match found.
977 */
978 int ata_xfer_mode2shift(unsigned long xfer_mode)
979 {
980 const struct ata_xfer_ent *ent;
981
982 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
983 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
984 return ent->shift;
985 return -1;
986 }
987
988 /**
989 * ata_mode_string - convert xfer_mask to string
990 * @xfer_mask: mask of bits supported; only highest bit counts.
991 *
992 * Determine string which represents the highest speed
993 * (highest bit in @modemask).
994 *
995 * LOCKING:
996 * None.
997 *
998 * RETURNS:
999 * Constant C string representing highest speed listed in
1000 * @mode_mask, or the constant C string "<n/a>".
1001 */
1002 const char *ata_mode_string(unsigned long xfer_mask)
1003 {
1004 static const char * const xfer_mode_str[] = {
1005 "PIO0",
1006 "PIO1",
1007 "PIO2",
1008 "PIO3",
1009 "PIO4",
1010 "PIO5",
1011 "PIO6",
1012 "MWDMA0",
1013 "MWDMA1",
1014 "MWDMA2",
1015 "MWDMA3",
1016 "MWDMA4",
1017 "UDMA/16",
1018 "UDMA/25",
1019 "UDMA/33",
1020 "UDMA/44",
1021 "UDMA/66",
1022 "UDMA/100",
1023 "UDMA/133",
1024 "UDMA7",
1025 };
1026 int highbit;
1027
1028 highbit = fls(xfer_mask) - 1;
1029 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1030 return xfer_mode_str[highbit];
1031 return "<n/a>";
1032 }
1033
1034 const char *sata_spd_string(unsigned int spd)
1035 {
1036 static const char * const spd_str[] = {
1037 "1.5 Gbps",
1038 "3.0 Gbps",
1039 "6.0 Gbps",
1040 };
1041
1042 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1043 return "<unknown>";
1044 return spd_str[spd - 1];
1045 }
1046
1047 /**
1048 * ata_dev_classify - determine device type based on ATA-spec signature
1049 * @tf: ATA taskfile register set for device to be identified
1050 *
1051 * Determine from taskfile register contents whether a device is
1052 * ATA or ATAPI, as per "Signature and persistence" section
1053 * of ATA/PI spec (volume 1, sect 5.14).
1054 *
1055 * LOCKING:
1056 * None.
1057 *
1058 * RETURNS:
1059 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1060 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1061 */
1062 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1063 {
1064 /* Apple's open source Darwin code hints that some devices only
1065 * put a proper signature into the LBA mid/high registers,
1066 * So, we only check those. It's sufficient for uniqueness.
1067 *
1068 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1069 * signatures for ATA and ATAPI devices attached on SerialATA,
1070 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1071 * spec has never mentioned about using different signatures
1072 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1073 * Multiplier specification began to use 0x69/0x96 to identify
1074 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1075 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1076 * 0x69/0x96 shortly and described them as reserved for
1077 * SerialATA.
1078 *
1079 * We follow the current spec and consider that 0x69/0x96
1080 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1081 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1082 * SEMB signature. This is worked around in
1083 * ata_dev_read_id().
1084 */
1085 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1086 DPRINTK("found ATA device by sig\n");
1087 return ATA_DEV_ATA;
1088 }
1089
1090 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1091 DPRINTK("found ATAPI device by sig\n");
1092 return ATA_DEV_ATAPI;
1093 }
1094
1095 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1096 DPRINTK("found PMP device by sig\n");
1097 return ATA_DEV_PMP;
1098 }
1099
1100 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1101 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1102 return ATA_DEV_SEMB;
1103 }
1104
1105 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1106 DPRINTK("found ZAC device by sig\n");
1107 return ATA_DEV_ZAC;
1108 }
1109
1110 DPRINTK("unknown device\n");
1111 return ATA_DEV_UNKNOWN;
1112 }
1113
1114 /**
1115 * ata_id_string - Convert IDENTIFY DEVICE page into string
1116 * @id: IDENTIFY DEVICE results we will examine
1117 * @s: string into which data is output
1118 * @ofs: offset into identify device page
1119 * @len: length of string to return. must be an even number.
1120 *
1121 * The strings in the IDENTIFY DEVICE page are broken up into
1122 * 16-bit chunks. Run through the string, and output each
1123 * 8-bit chunk linearly, regardless of platform.
1124 *
1125 * LOCKING:
1126 * caller.
1127 */
1128
1129 void ata_id_string(const u16 *id, unsigned char *s,
1130 unsigned int ofs, unsigned int len)
1131 {
1132 unsigned int c;
1133
1134 BUG_ON(len & 1);
1135
1136 while (len > 0) {
1137 c = id[ofs] >> 8;
1138 *s = c;
1139 s++;
1140
1141 c = id[ofs] & 0xff;
1142 *s = c;
1143 s++;
1144
1145 ofs++;
1146 len -= 2;
1147 }
1148 }
1149
1150 /**
1151 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1152 * @id: IDENTIFY DEVICE results we will examine
1153 * @s: string into which data is output
1154 * @ofs: offset into identify device page
1155 * @len: length of string to return. must be an odd number.
1156 *
1157 * This function is identical to ata_id_string except that it
1158 * trims trailing spaces and terminates the resulting string with
1159 * null. @len must be actual maximum length (even number) + 1.
1160 *
1161 * LOCKING:
1162 * caller.
1163 */
1164 void ata_id_c_string(const u16 *id, unsigned char *s,
1165 unsigned int ofs, unsigned int len)
1166 {
1167 unsigned char *p;
1168
1169 ata_id_string(id, s, ofs, len - 1);
1170
1171 p = s + strnlen(s, len - 1);
1172 while (p > s && p[-1] == ' ')
1173 p--;
1174 *p = '\0';
1175 }
1176
1177 static u64 ata_id_n_sectors(const u16 *id)
1178 {
1179 if (ata_id_has_lba(id)) {
1180 if (ata_id_has_lba48(id))
1181 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1182 else
1183 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1184 } else {
1185 if (ata_id_current_chs_valid(id))
1186 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1187 id[ATA_ID_CUR_SECTORS];
1188 else
1189 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1190 id[ATA_ID_SECTORS];
1191 }
1192 }
1193
1194 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1195 {
1196 u64 sectors = 0;
1197
1198 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1199 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1200 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1201 sectors |= (tf->lbah & 0xff) << 16;
1202 sectors |= (tf->lbam & 0xff) << 8;
1203 sectors |= (tf->lbal & 0xff);
1204
1205 return sectors;
1206 }
1207
1208 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1209 {
1210 u64 sectors = 0;
1211
1212 sectors |= (tf->device & 0x0f) << 24;
1213 sectors |= (tf->lbah & 0xff) << 16;
1214 sectors |= (tf->lbam & 0xff) << 8;
1215 sectors |= (tf->lbal & 0xff);
1216
1217 return sectors;
1218 }
1219
1220 /**
1221 * ata_read_native_max_address - Read native max address
1222 * @dev: target device
1223 * @max_sectors: out parameter for the result native max address
1224 *
1225 * Perform an LBA48 or LBA28 native size query upon the device in
1226 * question.
1227 *
1228 * RETURNS:
1229 * 0 on success, -EACCES if command is aborted by the drive.
1230 * -EIO on other errors.
1231 */
1232 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1233 {
1234 unsigned int err_mask;
1235 struct ata_taskfile tf;
1236 int lba48 = ata_id_has_lba48(dev->id);
1237
1238 ata_tf_init(dev, &tf);
1239
1240 /* always clear all address registers */
1241 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1242
1243 if (lba48) {
1244 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1245 tf.flags |= ATA_TFLAG_LBA48;
1246 } else
1247 tf.command = ATA_CMD_READ_NATIVE_MAX;
1248
1249 tf.protocol = ATA_PROT_NODATA;
1250 tf.device |= ATA_LBA;
1251
1252 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1253 if (err_mask) {
1254 ata_dev_warn(dev,
1255 "failed to read native max address (err_mask=0x%x)\n",
1256 err_mask);
1257 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1258 return -EACCES;
1259 return -EIO;
1260 }
1261
1262 if (lba48)
1263 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1264 else
1265 *max_sectors = ata_tf_to_lba(&tf) + 1;
1266 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1267 (*max_sectors)--;
1268 return 0;
1269 }
1270
1271 /**
1272 * ata_set_max_sectors - Set max sectors
1273 * @dev: target device
1274 * @new_sectors: new max sectors value to set for the device
1275 *
1276 * Set max sectors of @dev to @new_sectors.
1277 *
1278 * RETURNS:
1279 * 0 on success, -EACCES if command is aborted or denied (due to
1280 * previous non-volatile SET_MAX) by the drive. -EIO on other
1281 * errors.
1282 */
1283 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1284 {
1285 unsigned int err_mask;
1286 struct ata_taskfile tf;
1287 int lba48 = ata_id_has_lba48(dev->id);
1288
1289 new_sectors--;
1290
1291 ata_tf_init(dev, &tf);
1292
1293 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1294
1295 if (lba48) {
1296 tf.command = ATA_CMD_SET_MAX_EXT;
1297 tf.flags |= ATA_TFLAG_LBA48;
1298
1299 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1300 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1301 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1302 } else {
1303 tf.command = ATA_CMD_SET_MAX;
1304
1305 tf.device |= (new_sectors >> 24) & 0xf;
1306 }
1307
1308 tf.protocol = ATA_PROT_NODATA;
1309 tf.device |= ATA_LBA;
1310
1311 tf.lbal = (new_sectors >> 0) & 0xff;
1312 tf.lbam = (new_sectors >> 8) & 0xff;
1313 tf.lbah = (new_sectors >> 16) & 0xff;
1314
1315 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1316 if (err_mask) {
1317 ata_dev_warn(dev,
1318 "failed to set max address (err_mask=0x%x)\n",
1319 err_mask);
1320 if (err_mask == AC_ERR_DEV &&
1321 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1322 return -EACCES;
1323 return -EIO;
1324 }
1325
1326 return 0;
1327 }
1328
1329 /**
1330 * ata_hpa_resize - Resize a device with an HPA set
1331 * @dev: Device to resize
1332 *
1333 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1334 * it if required to the full size of the media. The caller must check
1335 * the drive has the HPA feature set enabled.
1336 *
1337 * RETURNS:
1338 * 0 on success, -errno on failure.
1339 */
1340 static int ata_hpa_resize(struct ata_device *dev)
1341 {
1342 struct ata_eh_context *ehc = &dev->link->eh_context;
1343 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1344 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1345 u64 sectors = ata_id_n_sectors(dev->id);
1346 u64 native_sectors;
1347 int rc;
1348
1349 /* do we need to do it? */
1350 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1351 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1352 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1353 return 0;
1354
1355 /* read native max address */
1356 rc = ata_read_native_max_address(dev, &native_sectors);
1357 if (rc) {
1358 /* If device aborted the command or HPA isn't going to
1359 * be unlocked, skip HPA resizing.
1360 */
1361 if (rc == -EACCES || !unlock_hpa) {
1362 ata_dev_warn(dev,
1363 "HPA support seems broken, skipping HPA handling\n");
1364 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1365
1366 /* we can continue if device aborted the command */
1367 if (rc == -EACCES)
1368 rc = 0;
1369 }
1370
1371 return rc;
1372 }
1373 dev->n_native_sectors = native_sectors;
1374
1375 /* nothing to do? */
1376 if (native_sectors <= sectors || !unlock_hpa) {
1377 if (!print_info || native_sectors == sectors)
1378 return 0;
1379
1380 if (native_sectors > sectors)
1381 ata_dev_info(dev,
1382 "HPA detected: current %llu, native %llu\n",
1383 (unsigned long long)sectors,
1384 (unsigned long long)native_sectors);
1385 else if (native_sectors < sectors)
1386 ata_dev_warn(dev,
1387 "native sectors (%llu) is smaller than sectors (%llu)\n",
1388 (unsigned long long)native_sectors,
1389 (unsigned long long)sectors);
1390 return 0;
1391 }
1392
1393 /* let's unlock HPA */
1394 rc = ata_set_max_sectors(dev, native_sectors);
1395 if (rc == -EACCES) {
1396 /* if device aborted the command, skip HPA resizing */
1397 ata_dev_warn(dev,
1398 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1399 (unsigned long long)sectors,
1400 (unsigned long long)native_sectors);
1401 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1402 return 0;
1403 } else if (rc)
1404 return rc;
1405
1406 /* re-read IDENTIFY data */
1407 rc = ata_dev_reread_id(dev, 0);
1408 if (rc) {
1409 ata_dev_err(dev,
1410 "failed to re-read IDENTIFY data after HPA resizing\n");
1411 return rc;
1412 }
1413
1414 if (print_info) {
1415 u64 new_sectors = ata_id_n_sectors(dev->id);
1416 ata_dev_info(dev,
1417 "HPA unlocked: %llu -> %llu, native %llu\n",
1418 (unsigned long long)sectors,
1419 (unsigned long long)new_sectors,
1420 (unsigned long long)native_sectors);
1421 }
1422
1423 return 0;
1424 }
1425
1426 /**
1427 * ata_dump_id - IDENTIFY DEVICE info debugging output
1428 * @id: IDENTIFY DEVICE page to dump
1429 *
1430 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1431 * page.
1432 *
1433 * LOCKING:
1434 * caller.
1435 */
1436
1437 static inline void ata_dump_id(const u16 *id)
1438 {
1439 DPRINTK("49==0x%04x "
1440 "53==0x%04x "
1441 "63==0x%04x "
1442 "64==0x%04x "
1443 "75==0x%04x \n",
1444 id[49],
1445 id[53],
1446 id[63],
1447 id[64],
1448 id[75]);
1449 DPRINTK("80==0x%04x "
1450 "81==0x%04x "
1451 "82==0x%04x "
1452 "83==0x%04x "
1453 "84==0x%04x \n",
1454 id[80],
1455 id[81],
1456 id[82],
1457 id[83],
1458 id[84]);
1459 DPRINTK("88==0x%04x "
1460 "93==0x%04x\n",
1461 id[88],
1462 id[93]);
1463 }
1464
1465 /**
1466 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1467 * @id: IDENTIFY data to compute xfer mask from
1468 *
1469 * Compute the xfermask for this device. This is not as trivial
1470 * as it seems if we must consider early devices correctly.
1471 *
1472 * FIXME: pre IDE drive timing (do we care ?).
1473 *
1474 * LOCKING:
1475 * None.
1476 *
1477 * RETURNS:
1478 * Computed xfermask
1479 */
1480 unsigned long ata_id_xfermask(const u16 *id)
1481 {
1482 unsigned long pio_mask, mwdma_mask, udma_mask;
1483
1484 /* Usual case. Word 53 indicates word 64 is valid */
1485 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1486 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1487 pio_mask <<= 3;
1488 pio_mask |= 0x7;
1489 } else {
1490 /* If word 64 isn't valid then Word 51 high byte holds
1491 * the PIO timing number for the maximum. Turn it into
1492 * a mask.
1493 */
1494 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1495 if (mode < 5) /* Valid PIO range */
1496 pio_mask = (2 << mode) - 1;
1497 else
1498 pio_mask = 1;
1499
1500 /* But wait.. there's more. Design your standards by
1501 * committee and you too can get a free iordy field to
1502 * process. However its the speeds not the modes that
1503 * are supported... Note drivers using the timing API
1504 * will get this right anyway
1505 */
1506 }
1507
1508 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1509
1510 if (ata_id_is_cfa(id)) {
1511 /*
1512 * Process compact flash extended modes
1513 */
1514 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1515 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1516
1517 if (pio)
1518 pio_mask |= (1 << 5);
1519 if (pio > 1)
1520 pio_mask |= (1 << 6);
1521 if (dma)
1522 mwdma_mask |= (1 << 3);
1523 if (dma > 1)
1524 mwdma_mask |= (1 << 4);
1525 }
1526
1527 udma_mask = 0;
1528 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1529 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1530
1531 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1532 }
1533
1534 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1535 {
1536 struct completion *waiting = qc->private_data;
1537
1538 complete(waiting);
1539 }
1540
1541 /**
1542 * ata_exec_internal_sg - execute libata internal command
1543 * @dev: Device to which the command is sent
1544 * @tf: Taskfile registers for the command and the result
1545 * @cdb: CDB for packet command
1546 * @dma_dir: Data transfer direction of the command
1547 * @sgl: sg list for the data buffer of the command
1548 * @n_elem: Number of sg entries
1549 * @timeout: Timeout in msecs (0 for default)
1550 *
1551 * Executes libata internal command with timeout. @tf contains
1552 * command on entry and result on return. Timeout and error
1553 * conditions are reported via return value. No recovery action
1554 * is taken after a command times out. It's caller's duty to
1555 * clean up after timeout.
1556 *
1557 * LOCKING:
1558 * None. Should be called with kernel context, might sleep.
1559 *
1560 * RETURNS:
1561 * Zero on success, AC_ERR_* mask on failure
1562 */
1563 unsigned ata_exec_internal_sg(struct ata_device *dev,
1564 struct ata_taskfile *tf, const u8 *cdb,
1565 int dma_dir, struct scatterlist *sgl,
1566 unsigned int n_elem, unsigned long timeout)
1567 {
1568 struct ata_link *link = dev->link;
1569 struct ata_port *ap = link->ap;
1570 u8 command = tf->command;
1571 int auto_timeout = 0;
1572 struct ata_queued_cmd *qc;
1573 unsigned int tag, preempted_tag;
1574 u32 preempted_sactive, preempted_qc_active;
1575 int preempted_nr_active_links;
1576 DECLARE_COMPLETION_ONSTACK(wait);
1577 unsigned long flags;
1578 unsigned int err_mask;
1579 int rc;
1580
1581 spin_lock_irqsave(ap->lock, flags);
1582
1583 /* no internal command while frozen */
1584 if (ap->pflags & ATA_PFLAG_FROZEN) {
1585 spin_unlock_irqrestore(ap->lock, flags);
1586 return AC_ERR_SYSTEM;
1587 }
1588
1589 /* initialize internal qc */
1590
1591 /* XXX: Tag 0 is used for drivers with legacy EH as some
1592 * drivers choke if any other tag is given. This breaks
1593 * ata_tag_internal() test for those drivers. Don't use new
1594 * EH stuff without converting to it.
1595 */
1596 if (ap->ops->error_handler)
1597 tag = ATA_TAG_INTERNAL;
1598 else
1599 tag = 0;
1600
1601 qc = __ata_qc_from_tag(ap, tag);
1602
1603 qc->tag = tag;
1604 qc->scsicmd = NULL;
1605 qc->ap = ap;
1606 qc->dev = dev;
1607 ata_qc_reinit(qc);
1608
1609 preempted_tag = link->active_tag;
1610 preempted_sactive = link->sactive;
1611 preempted_qc_active = ap->qc_active;
1612 preempted_nr_active_links = ap->nr_active_links;
1613 link->active_tag = ATA_TAG_POISON;
1614 link->sactive = 0;
1615 ap->qc_active = 0;
1616 ap->nr_active_links = 0;
1617
1618 /* prepare & issue qc */
1619 qc->tf = *tf;
1620 if (cdb)
1621 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1622
1623 /* some SATA bridges need us to indicate data xfer direction */
1624 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1625 dma_dir == DMA_FROM_DEVICE)
1626 qc->tf.feature |= ATAPI_DMADIR;
1627
1628 qc->flags |= ATA_QCFLAG_RESULT_TF;
1629 qc->dma_dir = dma_dir;
1630 if (dma_dir != DMA_NONE) {
1631 unsigned int i, buflen = 0;
1632 struct scatterlist *sg;
1633
1634 for_each_sg(sgl, sg, n_elem, i)
1635 buflen += sg->length;
1636
1637 ata_sg_init(qc, sgl, n_elem);
1638 qc->nbytes = buflen;
1639 }
1640
1641 qc->private_data = &wait;
1642 qc->complete_fn = ata_qc_complete_internal;
1643
1644 ata_qc_issue(qc);
1645
1646 spin_unlock_irqrestore(ap->lock, flags);
1647
1648 if (!timeout) {
1649 if (ata_probe_timeout)
1650 timeout = ata_probe_timeout * 1000;
1651 else {
1652 timeout = ata_internal_cmd_timeout(dev, command);
1653 auto_timeout = 1;
1654 }
1655 }
1656
1657 if (ap->ops->error_handler)
1658 ata_eh_release(ap);
1659
1660 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1661
1662 if (ap->ops->error_handler)
1663 ata_eh_acquire(ap);
1664
1665 ata_sff_flush_pio_task(ap);
1666
1667 if (!rc) {
1668 spin_lock_irqsave(ap->lock, flags);
1669
1670 /* We're racing with irq here. If we lose, the
1671 * following test prevents us from completing the qc
1672 * twice. If we win, the port is frozen and will be
1673 * cleaned up by ->post_internal_cmd().
1674 */
1675 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1676 qc->err_mask |= AC_ERR_TIMEOUT;
1677
1678 if (ap->ops->error_handler)
1679 ata_port_freeze(ap);
1680 else
1681 ata_qc_complete(qc);
1682
1683 if (ata_msg_warn(ap))
1684 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1685 command);
1686 }
1687
1688 spin_unlock_irqrestore(ap->lock, flags);
1689 }
1690
1691 /* do post_internal_cmd */
1692 if (ap->ops->post_internal_cmd)
1693 ap->ops->post_internal_cmd(qc);
1694
1695 /* perform minimal error analysis */
1696 if (qc->flags & ATA_QCFLAG_FAILED) {
1697 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1698 qc->err_mask |= AC_ERR_DEV;
1699
1700 if (!qc->err_mask)
1701 qc->err_mask |= AC_ERR_OTHER;
1702
1703 if (qc->err_mask & ~AC_ERR_OTHER)
1704 qc->err_mask &= ~AC_ERR_OTHER;
1705 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1706 qc->result_tf.command |= ATA_SENSE;
1707 }
1708
1709 /* finish up */
1710 spin_lock_irqsave(ap->lock, flags);
1711
1712 *tf = qc->result_tf;
1713 err_mask = qc->err_mask;
1714
1715 ata_qc_free(qc);
1716 link->active_tag = preempted_tag;
1717 link->sactive = preempted_sactive;
1718 ap->qc_active = preempted_qc_active;
1719 ap->nr_active_links = preempted_nr_active_links;
1720
1721 spin_unlock_irqrestore(ap->lock, flags);
1722
1723 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1724 ata_internal_cmd_timed_out(dev, command);
1725
1726 return err_mask;
1727 }
1728
1729 /**
1730 * ata_exec_internal - execute libata internal command
1731 * @dev: Device to which the command is sent
1732 * @tf: Taskfile registers for the command and the result
1733 * @cdb: CDB for packet command
1734 * @dma_dir: Data transfer direction of the command
1735 * @buf: Data buffer of the command
1736 * @buflen: Length of data buffer
1737 * @timeout: Timeout in msecs (0 for default)
1738 *
1739 * Wrapper around ata_exec_internal_sg() which takes simple
1740 * buffer instead of sg list.
1741 *
1742 * LOCKING:
1743 * None. Should be called with kernel context, might sleep.
1744 *
1745 * RETURNS:
1746 * Zero on success, AC_ERR_* mask on failure
1747 */
1748 unsigned ata_exec_internal(struct ata_device *dev,
1749 struct ata_taskfile *tf, const u8 *cdb,
1750 int dma_dir, void *buf, unsigned int buflen,
1751 unsigned long timeout)
1752 {
1753 struct scatterlist *psg = NULL, sg;
1754 unsigned int n_elem = 0;
1755
1756 if (dma_dir != DMA_NONE) {
1757 WARN_ON(!buf);
1758 sg_init_one(&sg, buf, buflen);
1759 psg = &sg;
1760 n_elem++;
1761 }
1762
1763 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1764 timeout);
1765 }
1766
1767 /**
1768 * ata_pio_need_iordy - check if iordy needed
1769 * @adev: ATA device
1770 *
1771 * Check if the current speed of the device requires IORDY. Used
1772 * by various controllers for chip configuration.
1773 */
1774 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1775 {
1776 /* Don't set IORDY if we're preparing for reset. IORDY may
1777 * lead to controller lock up on certain controllers if the
1778 * port is not occupied. See bko#11703 for details.
1779 */
1780 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1781 return 0;
1782 /* Controller doesn't support IORDY. Probably a pointless
1783 * check as the caller should know this.
1784 */
1785 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1786 return 0;
1787 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1788 if (ata_id_is_cfa(adev->id)
1789 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1790 return 0;
1791 /* PIO3 and higher it is mandatory */
1792 if (adev->pio_mode > XFER_PIO_2)
1793 return 1;
1794 /* We turn it on when possible */
1795 if (ata_id_has_iordy(adev->id))
1796 return 1;
1797 return 0;
1798 }
1799
1800 /**
1801 * ata_pio_mask_no_iordy - Return the non IORDY mask
1802 * @adev: ATA device
1803 *
1804 * Compute the highest mode possible if we are not using iordy. Return
1805 * -1 if no iordy mode is available.
1806 */
1807 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1808 {
1809 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1810 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1811 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1812 /* Is the speed faster than the drive allows non IORDY ? */
1813 if (pio) {
1814 /* This is cycle times not frequency - watch the logic! */
1815 if (pio > 240) /* PIO2 is 240nS per cycle */
1816 return 3 << ATA_SHIFT_PIO;
1817 return 7 << ATA_SHIFT_PIO;
1818 }
1819 }
1820 return 3 << ATA_SHIFT_PIO;
1821 }
1822
1823 /**
1824 * ata_do_dev_read_id - default ID read method
1825 * @dev: device
1826 * @tf: proposed taskfile
1827 * @id: data buffer
1828 *
1829 * Issue the identify taskfile and hand back the buffer containing
1830 * identify data. For some RAID controllers and for pre ATA devices
1831 * this function is wrapped or replaced by the driver
1832 */
1833 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1834 struct ata_taskfile *tf, u16 *id)
1835 {
1836 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1837 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1838 }
1839
1840 /**
1841 * ata_dev_read_id - Read ID data from the specified device
1842 * @dev: target device
1843 * @p_class: pointer to class of the target device (may be changed)
1844 * @flags: ATA_READID_* flags
1845 * @id: buffer to read IDENTIFY data into
1846 *
1847 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1848 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1849 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1850 * for pre-ATA4 drives.
1851 *
1852 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1853 * now we abort if we hit that case.
1854 *
1855 * LOCKING:
1856 * Kernel thread context (may sleep)
1857 *
1858 * RETURNS:
1859 * 0 on success, -errno otherwise.
1860 */
1861 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1862 unsigned int flags, u16 *id)
1863 {
1864 struct ata_port *ap = dev->link->ap;
1865 unsigned int class = *p_class;
1866 struct ata_taskfile tf;
1867 unsigned int err_mask = 0;
1868 const char *reason;
1869 bool is_semb = class == ATA_DEV_SEMB;
1870 int may_fallback = 1, tried_spinup = 0;
1871 int rc;
1872
1873 if (ata_msg_ctl(ap))
1874 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1875
1876 retry:
1877 ata_tf_init(dev, &tf);
1878
1879 switch (class) {
1880 case ATA_DEV_SEMB:
1881 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
1882 case ATA_DEV_ATA:
1883 case ATA_DEV_ZAC:
1884 tf.command = ATA_CMD_ID_ATA;
1885 break;
1886 case ATA_DEV_ATAPI:
1887 tf.command = ATA_CMD_ID_ATAPI;
1888 break;
1889 default:
1890 rc = -ENODEV;
1891 reason = "unsupported class";
1892 goto err_out;
1893 }
1894
1895 tf.protocol = ATA_PROT_PIO;
1896
1897 /* Some devices choke if TF registers contain garbage. Make
1898 * sure those are properly initialized.
1899 */
1900 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1901
1902 /* Device presence detection is unreliable on some
1903 * controllers. Always poll IDENTIFY if available.
1904 */
1905 tf.flags |= ATA_TFLAG_POLLING;
1906
1907 if (ap->ops->read_id)
1908 err_mask = ap->ops->read_id(dev, &tf, id);
1909 else
1910 err_mask = ata_do_dev_read_id(dev, &tf, id);
1911
1912 if (err_mask) {
1913 if (err_mask & AC_ERR_NODEV_HINT) {
1914 ata_dev_dbg(dev, "NODEV after polling detection\n");
1915 return -ENOENT;
1916 }
1917
1918 if (is_semb) {
1919 ata_dev_info(dev,
1920 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1921 /* SEMB is not supported yet */
1922 *p_class = ATA_DEV_SEMB_UNSUP;
1923 return 0;
1924 }
1925
1926 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1927 /* Device or controller might have reported
1928 * the wrong device class. Give a shot at the
1929 * other IDENTIFY if the current one is
1930 * aborted by the device.
1931 */
1932 if (may_fallback) {
1933 may_fallback = 0;
1934
1935 if (class == ATA_DEV_ATA)
1936 class = ATA_DEV_ATAPI;
1937 else
1938 class = ATA_DEV_ATA;
1939 goto retry;
1940 }
1941
1942 /* Control reaches here iff the device aborted
1943 * both flavors of IDENTIFYs which happens
1944 * sometimes with phantom devices.
1945 */
1946 ata_dev_dbg(dev,
1947 "both IDENTIFYs aborted, assuming NODEV\n");
1948 return -ENOENT;
1949 }
1950
1951 rc = -EIO;
1952 reason = "I/O error";
1953 goto err_out;
1954 }
1955
1956 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1957 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1958 "class=%d may_fallback=%d tried_spinup=%d\n",
1959 class, may_fallback, tried_spinup);
1960 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1961 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1962 }
1963
1964 /* Falling back doesn't make sense if ID data was read
1965 * successfully at least once.
1966 */
1967 may_fallback = 0;
1968
1969 swap_buf_le16(id, ATA_ID_WORDS);
1970
1971 /* sanity check */
1972 rc = -EINVAL;
1973 reason = "device reports invalid type";
1974
1975 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1976 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1977 goto err_out;
1978 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1979 ata_id_is_ata(id)) {
1980 ata_dev_dbg(dev,
1981 "host indicates ignore ATA devices, ignored\n");
1982 return -ENOENT;
1983 }
1984 } else {
1985 if (ata_id_is_ata(id))
1986 goto err_out;
1987 }
1988
1989 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1990 tried_spinup = 1;
1991 /*
1992 * Drive powered-up in standby mode, and requires a specific
1993 * SET_FEATURES spin-up subcommand before it will accept
1994 * anything other than the original IDENTIFY command.
1995 */
1996 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1997 if (err_mask && id[2] != 0x738c) {
1998 rc = -EIO;
1999 reason = "SPINUP failed";
2000 goto err_out;
2001 }
2002 /*
2003 * If the drive initially returned incomplete IDENTIFY info,
2004 * we now must reissue the IDENTIFY command.
2005 */
2006 if (id[2] == 0x37c8)
2007 goto retry;
2008 }
2009
2010 if ((flags & ATA_READID_POSTRESET) &&
2011 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
2012 /*
2013 * The exact sequence expected by certain pre-ATA4 drives is:
2014 * SRST RESET
2015 * IDENTIFY (optional in early ATA)
2016 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2017 * anything else..
2018 * Some drives were very specific about that exact sequence.
2019 *
2020 * Note that ATA4 says lba is mandatory so the second check
2021 * should never trigger.
2022 */
2023 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2024 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2025 if (err_mask) {
2026 rc = -EIO;
2027 reason = "INIT_DEV_PARAMS failed";
2028 goto err_out;
2029 }
2030
2031 /* current CHS translation info (id[53-58]) might be
2032 * changed. reread the identify device info.
2033 */
2034 flags &= ~ATA_READID_POSTRESET;
2035 goto retry;
2036 }
2037 }
2038
2039 *p_class = class;
2040
2041 return 0;
2042
2043 err_out:
2044 if (ata_msg_warn(ap))
2045 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2046 reason, err_mask);
2047 return rc;
2048 }
2049
2050 static int ata_do_link_spd_horkage(struct ata_device *dev)
2051 {
2052 struct ata_link *plink = ata_dev_phys_link(dev);
2053 u32 target, target_limit;
2054
2055 if (!sata_scr_valid(plink))
2056 return 0;
2057
2058 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2059 target = 1;
2060 else
2061 return 0;
2062
2063 target_limit = (1 << target) - 1;
2064
2065 /* if already on stricter limit, no need to push further */
2066 if (plink->sata_spd_limit <= target_limit)
2067 return 0;
2068
2069 plink->sata_spd_limit = target_limit;
2070
2071 /* Request another EH round by returning -EAGAIN if link is
2072 * going faster than the target speed. Forward progress is
2073 * guaranteed by setting sata_spd_limit to target_limit above.
2074 */
2075 if (plink->sata_spd > target) {
2076 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2077 sata_spd_string(target));
2078 return -EAGAIN;
2079 }
2080 return 0;
2081 }
2082
2083 static inline u8 ata_dev_knobble(struct ata_device *dev)
2084 {
2085 struct ata_port *ap = dev->link->ap;
2086
2087 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2088 return 0;
2089
2090 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2091 }
2092
2093 static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2094 {
2095 struct ata_port *ap = dev->link->ap;
2096 unsigned int err_mask;
2097 int log_index = ATA_LOG_NCQ_SEND_RECV * 2;
2098 u16 log_pages;
2099
2100 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2101 0, ap->sector_buf, 1);
2102 if (err_mask) {
2103 ata_dev_dbg(dev,
2104 "failed to get Log Directory Emask 0x%x\n",
2105 err_mask);
2106 return;
2107 }
2108 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2109 if (!log_pages) {
2110 ata_dev_warn(dev,
2111 "NCQ Send/Recv Log not supported\n");
2112 return;
2113 }
2114 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2115 0, ap->sector_buf, 1);
2116 if (err_mask) {
2117 ata_dev_dbg(dev,
2118 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2119 err_mask);
2120 } else {
2121 u8 *cmds = dev->ncq_send_recv_cmds;
2122
2123 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2124 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2125
2126 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2127 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2128 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2129 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2130 }
2131 }
2132 }
2133
2134 static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2135 {
2136 struct ata_port *ap = dev->link->ap;
2137 unsigned int err_mask;
2138 int log_index = ATA_LOG_NCQ_NON_DATA * 2;
2139 u16 log_pages;
2140
2141 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2142 0, ap->sector_buf, 1);
2143 if (err_mask) {
2144 ata_dev_dbg(dev,
2145 "failed to get Log Directory Emask 0x%x\n",
2146 err_mask);
2147 return;
2148 }
2149 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2150 if (!log_pages) {
2151 ata_dev_warn(dev,
2152 "NCQ Send/Recv Log not supported\n");
2153 return;
2154 }
2155 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2156 0, ap->sector_buf, 1);
2157 if (err_mask) {
2158 ata_dev_dbg(dev,
2159 "failed to get NCQ Non-Data Log Emask 0x%x\n",
2160 err_mask);
2161 } else {
2162 u8 *cmds = dev->ncq_non_data_cmds;
2163
2164 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2165 }
2166 }
2167
2168 static void ata_dev_config_ncq_prio(struct ata_device *dev)
2169 {
2170 struct ata_port *ap = dev->link->ap;
2171 unsigned int err_mask;
2172
2173 if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
2174 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2175 return;
2176 }
2177
2178 err_mask = ata_read_log_page(dev,
2179 ATA_LOG_SATA_ID_DEV_DATA,
2180 ATA_LOG_SATA_SETTINGS,
2181 ap->sector_buf,
2182 1);
2183 if (err_mask) {
2184 ata_dev_dbg(dev,
2185 "failed to get Identify Device data, Emask 0x%x\n",
2186 err_mask);
2187 return;
2188 }
2189
2190 if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
2191 dev->flags |= ATA_DFLAG_NCQ_PRIO;
2192 } else {
2193 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2194 ata_dev_dbg(dev, "SATA page does not support priority\n");
2195 }
2196
2197 }
2198
2199 static int ata_dev_config_ncq(struct ata_device *dev,
2200 char *desc, size_t desc_sz)
2201 {
2202 struct ata_port *ap = dev->link->ap;
2203 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2204 unsigned int err_mask;
2205 char *aa_desc = "";
2206
2207 if (!ata_id_has_ncq(dev->id)) {
2208 desc[0] = '\0';
2209 return 0;
2210 }
2211 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2212 snprintf(desc, desc_sz, "NCQ (not used)");
2213 return 0;
2214 }
2215 if (ap->flags & ATA_FLAG_NCQ) {
2216 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2217 dev->flags |= ATA_DFLAG_NCQ;
2218 }
2219
2220 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2221 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2222 ata_id_has_fpdma_aa(dev->id)) {
2223 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2224 SATA_FPDMA_AA);
2225 if (err_mask) {
2226 ata_dev_err(dev,
2227 "failed to enable AA (error_mask=0x%x)\n",
2228 err_mask);
2229 if (err_mask != AC_ERR_DEV) {
2230 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2231 return -EIO;
2232 }
2233 } else
2234 aa_desc = ", AA";
2235 }
2236
2237 if (hdepth >= ddepth)
2238 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2239 else
2240 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2241 ddepth, aa_desc);
2242
2243 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2244 if (ata_id_has_ncq_send_and_recv(dev->id))
2245 ata_dev_config_ncq_send_recv(dev);
2246 if (ata_id_has_ncq_non_data(dev->id))
2247 ata_dev_config_ncq_non_data(dev);
2248 if (ata_id_has_ncq_prio(dev->id))
2249 ata_dev_config_ncq_prio(dev);
2250 }
2251
2252 return 0;
2253 }
2254
2255 static void ata_dev_config_sense_reporting(struct ata_device *dev)
2256 {
2257 unsigned int err_mask;
2258
2259 if (!ata_id_has_sense_reporting(dev->id))
2260 return;
2261
2262 if (ata_id_sense_reporting_enabled(dev->id))
2263 return;
2264
2265 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2266 if (err_mask) {
2267 ata_dev_dbg(dev,
2268 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2269 err_mask);
2270 }
2271 }
2272
2273 static void ata_dev_config_zac(struct ata_device *dev)
2274 {
2275 struct ata_port *ap = dev->link->ap;
2276 unsigned int err_mask;
2277 u8 *identify_buf = ap->sector_buf;
2278 int log_index = ATA_LOG_SATA_ID_DEV_DATA * 2, i, found = 0;
2279 u16 log_pages;
2280
2281 dev->zac_zones_optimal_open = U32_MAX;
2282 dev->zac_zones_optimal_nonseq = U32_MAX;
2283 dev->zac_zones_max_open = U32_MAX;
2284
2285 /*
2286 * Always set the 'ZAC' flag for Host-managed devices.
2287 */
2288 if (dev->class == ATA_DEV_ZAC)
2289 dev->flags |= ATA_DFLAG_ZAC;
2290 else if (ata_id_zoned_cap(dev->id) == 0x01)
2291 /*
2292 * Check for host-aware devices.
2293 */
2294 dev->flags |= ATA_DFLAG_ZAC;
2295
2296 if (!(dev->flags & ATA_DFLAG_ZAC))
2297 return;
2298
2299 /*
2300 * Read Log Directory to figure out if IDENTIFY DEVICE log
2301 * is supported.
2302 */
2303 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
2304 0, ap->sector_buf, 1);
2305 if (err_mask) {
2306 ata_dev_info(dev,
2307 "failed to get Log Directory Emask 0x%x\n",
2308 err_mask);
2309 return;
2310 }
2311 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
2312 if (log_pages == 0) {
2313 ata_dev_warn(dev,
2314 "ATA Identify Device Log not supported\n");
2315 return;
2316 }
2317 /*
2318 * Read IDENTIFY DEVICE data log, page 0, to figure out
2319 * if page 9 is supported.
2320 */
2321 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, 0,
2322 identify_buf, 1);
2323 if (err_mask) {
2324 ata_dev_info(dev,
2325 "failed to get Device Identify Log Emask 0x%x\n",
2326 err_mask);
2327 return;
2328 }
2329 log_pages = identify_buf[8];
2330 for (i = 0; i < log_pages; i++) {
2331 if (identify_buf[9 + i] == ATA_LOG_ZONED_INFORMATION) {
2332 found++;
2333 break;
2334 }
2335 }
2336 if (!found) {
2337 ata_dev_warn(dev,
2338 "ATA Zoned Information Log not supported\n");
2339 return;
2340 }
2341
2342 /*
2343 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2344 */
2345 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA,
2346 ATA_LOG_ZONED_INFORMATION,
2347 identify_buf, 1);
2348 if (!err_mask) {
2349 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2350
2351 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2352 if ((zoned_cap >> 63))
2353 dev->zac_zoned_cap = (zoned_cap & 1);
2354 opt_open = get_unaligned_le64(&identify_buf[24]);
2355 if ((opt_open >> 63))
2356 dev->zac_zones_optimal_open = (u32)opt_open;
2357 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2358 if ((opt_nonseq >> 63))
2359 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2360 max_open = get_unaligned_le64(&identify_buf[40]);
2361 if ((max_open >> 63))
2362 dev->zac_zones_max_open = (u32)max_open;
2363 }
2364 }
2365
2366 /**
2367 * ata_dev_configure - Configure the specified ATA/ATAPI device
2368 * @dev: Target device to configure
2369 *
2370 * Configure @dev according to @dev->id. Generic and low-level
2371 * driver specific fixups are also applied.
2372 *
2373 * LOCKING:
2374 * Kernel thread context (may sleep)
2375 *
2376 * RETURNS:
2377 * 0 on success, -errno otherwise
2378 */
2379 int ata_dev_configure(struct ata_device *dev)
2380 {
2381 struct ata_port *ap = dev->link->ap;
2382 struct ata_eh_context *ehc = &dev->link->eh_context;
2383 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2384 const u16 *id = dev->id;
2385 unsigned long xfer_mask;
2386 unsigned int err_mask;
2387 char revbuf[7]; /* XYZ-99\0 */
2388 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2389 char modelbuf[ATA_ID_PROD_LEN+1];
2390 int rc;
2391
2392 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2393 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2394 return 0;
2395 }
2396
2397 if (ata_msg_probe(ap))
2398 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2399
2400 /* set horkage */
2401 dev->horkage |= ata_dev_blacklisted(dev);
2402 ata_force_horkage(dev);
2403
2404 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2405 ata_dev_info(dev, "unsupported device, disabling\n");
2406 ata_dev_disable(dev);
2407 return 0;
2408 }
2409
2410 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2411 dev->class == ATA_DEV_ATAPI) {
2412 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2413 atapi_enabled ? "not supported with this driver"
2414 : "disabled");
2415 ata_dev_disable(dev);
2416 return 0;
2417 }
2418
2419 rc = ata_do_link_spd_horkage(dev);
2420 if (rc)
2421 return rc;
2422
2423 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2424 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2425 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2426 dev->horkage |= ATA_HORKAGE_NOLPM;
2427
2428 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2429 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2430 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2431 }
2432
2433 /* let ACPI work its magic */
2434 rc = ata_acpi_on_devcfg(dev);
2435 if (rc)
2436 return rc;
2437
2438 /* massage HPA, do it early as it might change IDENTIFY data */
2439 rc = ata_hpa_resize(dev);
2440 if (rc)
2441 return rc;
2442
2443 /* print device capabilities */
2444 if (ata_msg_probe(ap))
2445 ata_dev_dbg(dev,
2446 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2447 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2448 __func__,
2449 id[49], id[82], id[83], id[84],
2450 id[85], id[86], id[87], id[88]);
2451
2452 /* initialize to-be-configured parameters */
2453 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2454 dev->max_sectors = 0;
2455 dev->cdb_len = 0;
2456 dev->n_sectors = 0;
2457 dev->cylinders = 0;
2458 dev->heads = 0;
2459 dev->sectors = 0;
2460 dev->multi_count = 0;
2461
2462 /*
2463 * common ATA, ATAPI feature tests
2464 */
2465
2466 /* find max transfer mode; for printk only */
2467 xfer_mask = ata_id_xfermask(id);
2468
2469 if (ata_msg_probe(ap))
2470 ata_dump_id(id);
2471
2472 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2473 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2474 sizeof(fwrevbuf));
2475
2476 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2477 sizeof(modelbuf));
2478
2479 /* ATA-specific feature tests */
2480 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2481 if (ata_id_is_cfa(id)) {
2482 /* CPRM may make this media unusable */
2483 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2484 ata_dev_warn(dev,
2485 "supports DRM functions and may not be fully accessible\n");
2486 snprintf(revbuf, 7, "CFA");
2487 } else {
2488 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2489 /* Warn the user if the device has TPM extensions */
2490 if (ata_id_has_tpm(id))
2491 ata_dev_warn(dev,
2492 "supports DRM functions and may not be fully accessible\n");
2493 }
2494
2495 dev->n_sectors = ata_id_n_sectors(id);
2496
2497 /* get current R/W Multiple count setting */
2498 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2499 unsigned int max = dev->id[47] & 0xff;
2500 unsigned int cnt = dev->id[59] & 0xff;
2501 /* only recognize/allow powers of two here */
2502 if (is_power_of_2(max) && is_power_of_2(cnt))
2503 if (cnt <= max)
2504 dev->multi_count = cnt;
2505 }
2506
2507 if (ata_id_has_lba(id)) {
2508 const char *lba_desc;
2509 char ncq_desc[24];
2510
2511 lba_desc = "LBA";
2512 dev->flags |= ATA_DFLAG_LBA;
2513 if (ata_id_has_lba48(id)) {
2514 dev->flags |= ATA_DFLAG_LBA48;
2515 lba_desc = "LBA48";
2516
2517 if (dev->n_sectors >= (1UL << 28) &&
2518 ata_id_has_flush_ext(id))
2519 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2520 }
2521
2522 /* config NCQ */
2523 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2524 if (rc)
2525 return rc;
2526
2527 /* print device info to dmesg */
2528 if (ata_msg_drv(ap) && print_info) {
2529 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2530 revbuf, modelbuf, fwrevbuf,
2531 ata_mode_string(xfer_mask));
2532 ata_dev_info(dev,
2533 "%llu sectors, multi %u: %s %s\n",
2534 (unsigned long long)dev->n_sectors,
2535 dev->multi_count, lba_desc, ncq_desc);
2536 }
2537 } else {
2538 /* CHS */
2539
2540 /* Default translation */
2541 dev->cylinders = id[1];
2542 dev->heads = id[3];
2543 dev->sectors = id[6];
2544
2545 if (ata_id_current_chs_valid(id)) {
2546 /* Current CHS translation is valid. */
2547 dev->cylinders = id[54];
2548 dev->heads = id[55];
2549 dev->sectors = id[56];
2550 }
2551
2552 /* print device info to dmesg */
2553 if (ata_msg_drv(ap) && print_info) {
2554 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2555 revbuf, modelbuf, fwrevbuf,
2556 ata_mode_string(xfer_mask));
2557 ata_dev_info(dev,
2558 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2559 (unsigned long long)dev->n_sectors,
2560 dev->multi_count, dev->cylinders,
2561 dev->heads, dev->sectors);
2562 }
2563 }
2564
2565 /* Check and mark DevSlp capability. Get DevSlp timing variables
2566 * from SATA Settings page of Identify Device Data Log.
2567 */
2568 if (ata_id_has_devslp(dev->id)) {
2569 u8 *sata_setting = ap->sector_buf;
2570 int i, j;
2571
2572 dev->flags |= ATA_DFLAG_DEVSLP;
2573 err_mask = ata_read_log_page(dev,
2574 ATA_LOG_SATA_ID_DEV_DATA,
2575 ATA_LOG_SATA_SETTINGS,
2576 sata_setting,
2577 1);
2578 if (err_mask)
2579 ata_dev_dbg(dev,
2580 "failed to get Identify Device Data, Emask 0x%x\n",
2581 err_mask);
2582 else
2583 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2584 j = ATA_LOG_DEVSLP_OFFSET + i;
2585 dev->devslp_timing[i] = sata_setting[j];
2586 }
2587 }
2588 ata_dev_config_sense_reporting(dev);
2589 ata_dev_config_zac(dev);
2590 dev->cdb_len = 16;
2591 }
2592
2593 /* ATAPI-specific feature tests */
2594 else if (dev->class == ATA_DEV_ATAPI) {
2595 const char *cdb_intr_string = "";
2596 const char *atapi_an_string = "";
2597 const char *dma_dir_string = "";
2598 u32 sntf;
2599
2600 rc = atapi_cdb_len(id);
2601 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2602 if (ata_msg_warn(ap))
2603 ata_dev_warn(dev, "unsupported CDB len\n");
2604 rc = -EINVAL;
2605 goto err_out_nosup;
2606 }
2607 dev->cdb_len = (unsigned int) rc;
2608
2609 /* Enable ATAPI AN if both the host and device have
2610 * the support. If PMP is attached, SNTF is required
2611 * to enable ATAPI AN to discern between PHY status
2612 * changed notifications and ATAPI ANs.
2613 */
2614 if (atapi_an &&
2615 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2616 (!sata_pmp_attached(ap) ||
2617 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2618 /* issue SET feature command to turn this on */
2619 err_mask = ata_dev_set_feature(dev,
2620 SETFEATURES_SATA_ENABLE, SATA_AN);
2621 if (err_mask)
2622 ata_dev_err(dev,
2623 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2624 err_mask);
2625 else {
2626 dev->flags |= ATA_DFLAG_AN;
2627 atapi_an_string = ", ATAPI AN";
2628 }
2629 }
2630
2631 if (ata_id_cdb_intr(dev->id)) {
2632 dev->flags |= ATA_DFLAG_CDB_INTR;
2633 cdb_intr_string = ", CDB intr";
2634 }
2635
2636 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2637 dev->flags |= ATA_DFLAG_DMADIR;
2638 dma_dir_string = ", DMADIR";
2639 }
2640
2641 if (ata_id_has_da(dev->id)) {
2642 dev->flags |= ATA_DFLAG_DA;
2643 zpodd_init(dev);
2644 }
2645
2646 /* print device info to dmesg */
2647 if (ata_msg_drv(ap) && print_info)
2648 ata_dev_info(dev,
2649 "ATAPI: %s, %s, max %s%s%s%s\n",
2650 modelbuf, fwrevbuf,
2651 ata_mode_string(xfer_mask),
2652 cdb_intr_string, atapi_an_string,
2653 dma_dir_string);
2654 }
2655
2656 /* determine max_sectors */
2657 dev->max_sectors = ATA_MAX_SECTORS;
2658 if (dev->flags & ATA_DFLAG_LBA48)
2659 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2660
2661 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2662 200 sectors */
2663 if (ata_dev_knobble(dev)) {
2664 if (ata_msg_drv(ap) && print_info)
2665 ata_dev_info(dev, "applying bridge limits\n");
2666 dev->udma_mask &= ATA_UDMA5;
2667 dev->max_sectors = ATA_MAX_SECTORS;
2668 }
2669
2670 if ((dev->class == ATA_DEV_ATAPI) &&
2671 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2672 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2673 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2674 }
2675
2676 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2677 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2678 dev->max_sectors);
2679
2680 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2681 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2682 dev->max_sectors);
2683
2684 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2685 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2686
2687 if (ap->ops->dev_config)
2688 ap->ops->dev_config(dev);
2689
2690 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2691 /* Let the user know. We don't want to disallow opens for
2692 rescue purposes, or in case the vendor is just a blithering
2693 idiot. Do this after the dev_config call as some controllers
2694 with buggy firmware may want to avoid reporting false device
2695 bugs */
2696
2697 if (print_info) {
2698 ata_dev_warn(dev,
2699 "Drive reports diagnostics failure. This may indicate a drive\n");
2700 ata_dev_warn(dev,
2701 "fault or invalid emulation. Contact drive vendor for information.\n");
2702 }
2703 }
2704
2705 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2706 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2707 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2708 }
2709
2710 return 0;
2711
2712 err_out_nosup:
2713 if (ata_msg_probe(ap))
2714 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2715 return rc;
2716 }
2717
2718 /**
2719 * ata_cable_40wire - return 40 wire cable type
2720 * @ap: port
2721 *
2722 * Helper method for drivers which want to hardwire 40 wire cable
2723 * detection.
2724 */
2725
2726 int ata_cable_40wire(struct ata_port *ap)
2727 {
2728 return ATA_CBL_PATA40;
2729 }
2730
2731 /**
2732 * ata_cable_80wire - return 80 wire cable type
2733 * @ap: port
2734 *
2735 * Helper method for drivers which want to hardwire 80 wire cable
2736 * detection.
2737 */
2738
2739 int ata_cable_80wire(struct ata_port *ap)
2740 {
2741 return ATA_CBL_PATA80;
2742 }
2743
2744 /**
2745 * ata_cable_unknown - return unknown PATA cable.
2746 * @ap: port
2747 *
2748 * Helper method for drivers which have no PATA cable detection.
2749 */
2750
2751 int ata_cable_unknown(struct ata_port *ap)
2752 {
2753 return ATA_CBL_PATA_UNK;
2754 }
2755
2756 /**
2757 * ata_cable_ignore - return ignored PATA cable.
2758 * @ap: port
2759 *
2760 * Helper method for drivers which don't use cable type to limit
2761 * transfer mode.
2762 */
2763 int ata_cable_ignore(struct ata_port *ap)
2764 {
2765 return ATA_CBL_PATA_IGN;
2766 }
2767
2768 /**
2769 * ata_cable_sata - return SATA cable type
2770 * @ap: port
2771 *
2772 * Helper method for drivers which have SATA cables
2773 */
2774
2775 int ata_cable_sata(struct ata_port *ap)
2776 {
2777 return ATA_CBL_SATA;
2778 }
2779
2780 /**
2781 * ata_bus_probe - Reset and probe ATA bus
2782 * @ap: Bus to probe
2783 *
2784 * Master ATA bus probing function. Initiates a hardware-dependent
2785 * bus reset, then attempts to identify any devices found on
2786 * the bus.
2787 *
2788 * LOCKING:
2789 * PCI/etc. bus probe sem.
2790 *
2791 * RETURNS:
2792 * Zero on success, negative errno otherwise.
2793 */
2794
2795 int ata_bus_probe(struct ata_port *ap)
2796 {
2797 unsigned int classes[ATA_MAX_DEVICES];
2798 int tries[ATA_MAX_DEVICES];
2799 int rc;
2800 struct ata_device *dev;
2801
2802 ata_for_each_dev(dev, &ap->link, ALL)
2803 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2804
2805 retry:
2806 ata_for_each_dev(dev, &ap->link, ALL) {
2807 /* If we issue an SRST then an ATA drive (not ATAPI)
2808 * may change configuration and be in PIO0 timing. If
2809 * we do a hard reset (or are coming from power on)
2810 * this is true for ATA or ATAPI. Until we've set a
2811 * suitable controller mode we should not touch the
2812 * bus as we may be talking too fast.
2813 */
2814 dev->pio_mode = XFER_PIO_0;
2815 dev->dma_mode = 0xff;
2816
2817 /* If the controller has a pio mode setup function
2818 * then use it to set the chipset to rights. Don't
2819 * touch the DMA setup as that will be dealt with when
2820 * configuring devices.
2821 */
2822 if (ap->ops->set_piomode)
2823 ap->ops->set_piomode(ap, dev);
2824 }
2825
2826 /* reset and determine device classes */
2827 ap->ops->phy_reset(ap);
2828
2829 ata_for_each_dev(dev, &ap->link, ALL) {
2830 if (dev->class != ATA_DEV_UNKNOWN)
2831 classes[dev->devno] = dev->class;
2832 else
2833 classes[dev->devno] = ATA_DEV_NONE;
2834
2835 dev->class = ATA_DEV_UNKNOWN;
2836 }
2837
2838 /* read IDENTIFY page and configure devices. We have to do the identify
2839 specific sequence bass-ackwards so that PDIAG- is released by
2840 the slave device */
2841
2842 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2843 if (tries[dev->devno])
2844 dev->class = classes[dev->devno];
2845
2846 if (!ata_dev_enabled(dev))
2847 continue;
2848
2849 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2850 dev->id);
2851 if (rc)
2852 goto fail;
2853 }
2854
2855 /* Now ask for the cable type as PDIAG- should have been released */
2856 if (ap->ops->cable_detect)
2857 ap->cbl = ap->ops->cable_detect(ap);
2858
2859 /* We may have SATA bridge glue hiding here irrespective of
2860 * the reported cable types and sensed types. When SATA
2861 * drives indicate we have a bridge, we don't know which end
2862 * of the link the bridge is which is a problem.
2863 */
2864 ata_for_each_dev(dev, &ap->link, ENABLED)
2865 if (ata_id_is_sata(dev->id))
2866 ap->cbl = ATA_CBL_SATA;
2867
2868 /* After the identify sequence we can now set up the devices. We do
2869 this in the normal order so that the user doesn't get confused */
2870
2871 ata_for_each_dev(dev, &ap->link, ENABLED) {
2872 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2873 rc = ata_dev_configure(dev);
2874 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2875 if (rc)
2876 goto fail;
2877 }
2878
2879 /* configure transfer mode */
2880 rc = ata_set_mode(&ap->link, &dev);
2881 if (rc)
2882 goto fail;
2883
2884 ata_for_each_dev(dev, &ap->link, ENABLED)
2885 return 0;
2886
2887 return -ENODEV;
2888
2889 fail:
2890 tries[dev->devno]--;
2891
2892 switch (rc) {
2893 case -EINVAL:
2894 /* eeek, something went very wrong, give up */
2895 tries[dev->devno] = 0;
2896 break;
2897
2898 case -ENODEV:
2899 /* give it just one more chance */
2900 tries[dev->devno] = min(tries[dev->devno], 1);
2901 case -EIO:
2902 if (tries[dev->devno] == 1) {
2903 /* This is the last chance, better to slow
2904 * down than lose it.
2905 */
2906 sata_down_spd_limit(&ap->link, 0);
2907 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2908 }
2909 }
2910
2911 if (!tries[dev->devno])
2912 ata_dev_disable(dev);
2913
2914 goto retry;
2915 }
2916
2917 /**
2918 * sata_print_link_status - Print SATA link status
2919 * @link: SATA link to printk link status about
2920 *
2921 * This function prints link speed and status of a SATA link.
2922 *
2923 * LOCKING:
2924 * None.
2925 */
2926 static void sata_print_link_status(struct ata_link *link)
2927 {
2928 u32 sstatus, scontrol, tmp;
2929
2930 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2931 return;
2932 sata_scr_read(link, SCR_CONTROL, &scontrol);
2933
2934 if (ata_phys_link_online(link)) {
2935 tmp = (sstatus >> 4) & 0xf;
2936 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2937 sata_spd_string(tmp), sstatus, scontrol);
2938 } else {
2939 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2940 sstatus, scontrol);
2941 }
2942 }
2943
2944 /**
2945 * ata_dev_pair - return other device on cable
2946 * @adev: device
2947 *
2948 * Obtain the other device on the same cable, or if none is
2949 * present NULL is returned
2950 */
2951
2952 struct ata_device *ata_dev_pair(struct ata_device *adev)
2953 {
2954 struct ata_link *link = adev->link;
2955 struct ata_device *pair = &link->device[1 - adev->devno];
2956 if (!ata_dev_enabled(pair))
2957 return NULL;
2958 return pair;
2959 }
2960
2961 /**
2962 * sata_down_spd_limit - adjust SATA spd limit downward
2963 * @link: Link to adjust SATA spd limit for
2964 * @spd_limit: Additional limit
2965 *
2966 * Adjust SATA spd limit of @link downward. Note that this
2967 * function only adjusts the limit. The change must be applied
2968 * using sata_set_spd().
2969 *
2970 * If @spd_limit is non-zero, the speed is limited to equal to or
2971 * lower than @spd_limit if such speed is supported. If
2972 * @spd_limit is slower than any supported speed, only the lowest
2973 * supported speed is allowed.
2974 *
2975 * LOCKING:
2976 * Inherited from caller.
2977 *
2978 * RETURNS:
2979 * 0 on success, negative errno on failure
2980 */
2981 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2982 {
2983 u32 sstatus, spd, mask;
2984 int rc, bit;
2985
2986 if (!sata_scr_valid(link))
2987 return -EOPNOTSUPP;
2988
2989 /* If SCR can be read, use it to determine the current SPD.
2990 * If not, use cached value in link->sata_spd.
2991 */
2992 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2993 if (rc == 0 && ata_sstatus_online(sstatus))
2994 spd = (sstatus >> 4) & 0xf;
2995 else
2996 spd = link->sata_spd;
2997
2998 mask = link->sata_spd_limit;
2999 if (mask <= 1)
3000 return -EINVAL;
3001
3002 /* unconditionally mask off the highest bit */
3003 bit = fls(mask) - 1;
3004 mask &= ~(1 << bit);
3005
3006 /* Mask off all speeds higher than or equal to the current
3007 * one. Force 1.5Gbps if current SPD is not available.
3008 */
3009 if (spd > 1)
3010 mask &= (1 << (spd - 1)) - 1;
3011 else
3012 mask &= 1;
3013
3014 /* were we already at the bottom? */
3015 if (!mask)
3016 return -EINVAL;
3017
3018 if (spd_limit) {
3019 if (mask & ((1 << spd_limit) - 1))
3020 mask &= (1 << spd_limit) - 1;
3021 else {
3022 bit = ffs(mask) - 1;
3023 mask = 1 << bit;
3024 }
3025 }
3026
3027 link->sata_spd_limit = mask;
3028
3029 ata_link_warn(link, "limiting SATA link speed to %s\n",
3030 sata_spd_string(fls(mask)));
3031
3032 return 0;
3033 }
3034
3035 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
3036 {
3037 struct ata_link *host_link = &link->ap->link;
3038 u32 limit, target, spd;
3039
3040 limit = link->sata_spd_limit;
3041
3042 /* Don't configure downstream link faster than upstream link.
3043 * It doesn't speed up anything and some PMPs choke on such
3044 * configuration.
3045 */
3046 if (!ata_is_host_link(link) && host_link->sata_spd)
3047 limit &= (1 << host_link->sata_spd) - 1;
3048
3049 if (limit == UINT_MAX)
3050 target = 0;
3051 else
3052 target = fls(limit);
3053
3054 spd = (*scontrol >> 4) & 0xf;
3055 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
3056
3057 return spd != target;
3058 }
3059
3060 /**
3061 * sata_set_spd_needed - is SATA spd configuration needed
3062 * @link: Link in question
3063 *
3064 * Test whether the spd limit in SControl matches
3065 * @link->sata_spd_limit. This function is used to determine
3066 * whether hardreset is necessary to apply SATA spd
3067 * configuration.
3068 *
3069 * LOCKING:
3070 * Inherited from caller.
3071 *
3072 * RETURNS:
3073 * 1 if SATA spd configuration is needed, 0 otherwise.
3074 */
3075 static int sata_set_spd_needed(struct ata_link *link)
3076 {
3077 u32 scontrol;
3078
3079 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3080 return 1;
3081
3082 return __sata_set_spd_needed(link, &scontrol);
3083 }
3084
3085 /**
3086 * sata_set_spd - set SATA spd according to spd limit
3087 * @link: Link to set SATA spd for
3088 *
3089 * Set SATA spd of @link according to sata_spd_limit.
3090 *
3091 * LOCKING:
3092 * Inherited from caller.
3093 *
3094 * RETURNS:
3095 * 0 if spd doesn't need to be changed, 1 if spd has been
3096 * changed. Negative errno if SCR registers are inaccessible.
3097 */
3098 int sata_set_spd(struct ata_link *link)
3099 {
3100 u32 scontrol;
3101 int rc;
3102
3103 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3104 return rc;
3105
3106 if (!__sata_set_spd_needed(link, &scontrol))
3107 return 0;
3108
3109 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3110 return rc;
3111
3112 return 1;
3113 }
3114
3115 /*
3116 * This mode timing computation functionality is ported over from
3117 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3118 */
3119 /*
3120 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3121 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3122 * for UDMA6, which is currently supported only by Maxtor drives.
3123 *
3124 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3125 */
3126
3127 static const struct ata_timing ata_timing[] = {
3128 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
3129 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
3130 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
3131 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
3132 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
3133 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
3134 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
3135 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
3136
3137 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
3138 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
3139 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
3140
3141 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
3142 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
3143 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
3144 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
3145 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
3146
3147 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
3148 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
3149 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
3150 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
3151 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
3152 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
3153 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
3154 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
3155
3156 { 0xFF }
3157 };
3158
3159 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3160 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
3161
3162 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3163 {
3164 q->setup = EZ(t->setup * 1000, T);
3165 q->act8b = EZ(t->act8b * 1000, T);
3166 q->rec8b = EZ(t->rec8b * 1000, T);
3167 q->cyc8b = EZ(t->cyc8b * 1000, T);
3168 q->active = EZ(t->active * 1000, T);
3169 q->recover = EZ(t->recover * 1000, T);
3170 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
3171 q->cycle = EZ(t->cycle * 1000, T);
3172 q->udma = EZ(t->udma * 1000, UT);
3173 }
3174
3175 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3176 struct ata_timing *m, unsigned int what)
3177 {
3178 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3179 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3180 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3181 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3182 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3183 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3184 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3185 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3186 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3187 }
3188
3189 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3190 {
3191 const struct ata_timing *t = ata_timing;
3192
3193 while (xfer_mode > t->mode)
3194 t++;
3195
3196 if (xfer_mode == t->mode)
3197 return t;
3198
3199 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3200 __func__, xfer_mode);
3201
3202 return NULL;
3203 }
3204
3205 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3206 struct ata_timing *t, int T, int UT)
3207 {
3208 const u16 *id = adev->id;
3209 const struct ata_timing *s;
3210 struct ata_timing p;
3211
3212 /*
3213 * Find the mode.
3214 */
3215
3216 if (!(s = ata_timing_find_mode(speed)))
3217 return -EINVAL;
3218
3219 memcpy(t, s, sizeof(*s));
3220
3221 /*
3222 * If the drive is an EIDE drive, it can tell us it needs extended
3223 * PIO/MW_DMA cycle timing.
3224 */
3225
3226 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3227 memset(&p, 0, sizeof(p));
3228
3229 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3230 if (speed <= XFER_PIO_2)
3231 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3232 else if ((speed <= XFER_PIO_4) ||
3233 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3234 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3235 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3236 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3237
3238 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3239 }
3240
3241 /*
3242 * Convert the timing to bus clock counts.
3243 */
3244
3245 ata_timing_quantize(t, t, T, UT);
3246
3247 /*
3248 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3249 * S.M.A.R.T * and some other commands. We have to ensure that the
3250 * DMA cycle timing is slower/equal than the fastest PIO timing.
3251 */
3252
3253 if (speed > XFER_PIO_6) {
3254 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3255 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3256 }
3257
3258 /*
3259 * Lengthen active & recovery time so that cycle time is correct.
3260 */
3261
3262 if (t->act8b + t->rec8b < t->cyc8b) {
3263 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3264 t->rec8b = t->cyc8b - t->act8b;
3265 }
3266
3267 if (t->active + t->recover < t->cycle) {
3268 t->active += (t->cycle - (t->active + t->recover)) / 2;
3269 t->recover = t->cycle - t->active;
3270 }
3271
3272 /* In a few cases quantisation may produce enough errors to
3273 leave t->cycle too low for the sum of active and recovery
3274 if so we must correct this */
3275 if (t->active + t->recover > t->cycle)
3276 t->cycle = t->active + t->recover;
3277
3278 return 0;
3279 }
3280
3281 /**
3282 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3283 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3284 * @cycle: cycle duration in ns
3285 *
3286 * Return matching xfer mode for @cycle. The returned mode is of
3287 * the transfer type specified by @xfer_shift. If @cycle is too
3288 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3289 * than the fastest known mode, the fasted mode is returned.
3290 *
3291 * LOCKING:
3292 * None.
3293 *
3294 * RETURNS:
3295 * Matching xfer_mode, 0xff if no match found.
3296 */
3297 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3298 {
3299 u8 base_mode = 0xff, last_mode = 0xff;
3300 const struct ata_xfer_ent *ent;
3301 const struct ata_timing *t;
3302
3303 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3304 if (ent->shift == xfer_shift)
3305 base_mode = ent->base;
3306
3307 for (t = ata_timing_find_mode(base_mode);
3308 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3309 unsigned short this_cycle;
3310
3311 switch (xfer_shift) {
3312 case ATA_SHIFT_PIO:
3313 case ATA_SHIFT_MWDMA:
3314 this_cycle = t->cycle;
3315 break;
3316 case ATA_SHIFT_UDMA:
3317 this_cycle = t->udma;
3318 break;
3319 default:
3320 return 0xff;
3321 }
3322
3323 if (cycle > this_cycle)
3324 break;
3325
3326 last_mode = t->mode;
3327 }
3328
3329 return last_mode;
3330 }
3331
3332 /**
3333 * ata_down_xfermask_limit - adjust dev xfer masks downward
3334 * @dev: Device to adjust xfer masks
3335 * @sel: ATA_DNXFER_* selector
3336 *
3337 * Adjust xfer masks of @dev downward. Note that this function
3338 * does not apply the change. Invoking ata_set_mode() afterwards
3339 * will apply the limit.
3340 *
3341 * LOCKING:
3342 * Inherited from caller.
3343 *
3344 * RETURNS:
3345 * 0 on success, negative errno on failure
3346 */
3347 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3348 {
3349 char buf[32];
3350 unsigned long orig_mask, xfer_mask;
3351 unsigned long pio_mask, mwdma_mask, udma_mask;
3352 int quiet, highbit;
3353
3354 quiet = !!(sel & ATA_DNXFER_QUIET);
3355 sel &= ~ATA_DNXFER_QUIET;
3356
3357 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3358 dev->mwdma_mask,
3359 dev->udma_mask);
3360 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3361
3362 switch (sel) {
3363 case ATA_DNXFER_PIO:
3364 highbit = fls(pio_mask) - 1;
3365 pio_mask &= ~(1 << highbit);
3366 break;
3367
3368 case ATA_DNXFER_DMA:
3369 if (udma_mask) {
3370 highbit = fls(udma_mask) - 1;
3371 udma_mask &= ~(1 << highbit);
3372 if (!udma_mask)
3373 return -ENOENT;
3374 } else if (mwdma_mask) {
3375 highbit = fls(mwdma_mask) - 1;
3376 mwdma_mask &= ~(1 << highbit);
3377 if (!mwdma_mask)
3378 return -ENOENT;
3379 }
3380 break;
3381
3382 case ATA_DNXFER_40C:
3383 udma_mask &= ATA_UDMA_MASK_40C;
3384 break;
3385
3386 case ATA_DNXFER_FORCE_PIO0:
3387 pio_mask &= 1;
3388 case ATA_DNXFER_FORCE_PIO:
3389 mwdma_mask = 0;
3390 udma_mask = 0;
3391 break;
3392
3393 default:
3394 BUG();
3395 }
3396
3397 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3398
3399 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3400 return -ENOENT;
3401
3402 if (!quiet) {
3403 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3404 snprintf(buf, sizeof(buf), "%s:%s",
3405 ata_mode_string(xfer_mask),
3406 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3407 else
3408 snprintf(buf, sizeof(buf), "%s",
3409 ata_mode_string(xfer_mask));
3410
3411 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3412 }
3413
3414 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3415 &dev->udma_mask);
3416
3417 return 0;
3418 }
3419
3420 static int ata_dev_set_mode(struct ata_device *dev)
3421 {
3422 struct ata_port *ap = dev->link->ap;
3423 struct ata_eh_context *ehc = &dev->link->eh_context;
3424 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3425 const char *dev_err_whine = "";
3426 int ign_dev_err = 0;
3427 unsigned int err_mask = 0;
3428 int rc;
3429
3430 dev->flags &= ~ATA_DFLAG_PIO;
3431 if (dev->xfer_shift == ATA_SHIFT_PIO)
3432 dev->flags |= ATA_DFLAG_PIO;
3433
3434 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3435 dev_err_whine = " (SET_XFERMODE skipped)";
3436 else {
3437 if (nosetxfer)
3438 ata_dev_warn(dev,
3439 "NOSETXFER but PATA detected - can't "
3440 "skip SETXFER, might malfunction\n");
3441 err_mask = ata_dev_set_xfermode(dev);
3442 }
3443
3444 if (err_mask & ~AC_ERR_DEV)
3445 goto fail;
3446
3447 /* revalidate */
3448 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3449 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3450 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3451 if (rc)
3452 return rc;
3453
3454 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3455 /* Old CFA may refuse this command, which is just fine */
3456 if (ata_id_is_cfa(dev->id))
3457 ign_dev_err = 1;
3458 /* Catch several broken garbage emulations plus some pre
3459 ATA devices */
3460 if (ata_id_major_version(dev->id) == 0 &&
3461 dev->pio_mode <= XFER_PIO_2)
3462 ign_dev_err = 1;
3463 /* Some very old devices and some bad newer ones fail
3464 any kind of SET_XFERMODE request but support PIO0-2
3465 timings and no IORDY */
3466 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3467 ign_dev_err = 1;
3468 }
3469 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3470 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3471 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3472 dev->dma_mode == XFER_MW_DMA_0 &&
3473 (dev->id[63] >> 8) & 1)
3474 ign_dev_err = 1;
3475
3476 /* if the device is actually configured correctly, ignore dev err */
3477 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3478 ign_dev_err = 1;
3479
3480 if (err_mask & AC_ERR_DEV) {
3481 if (!ign_dev_err)
3482 goto fail;
3483 else
3484 dev_err_whine = " (device error ignored)";
3485 }
3486
3487 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3488 dev->xfer_shift, (int)dev->xfer_mode);
3489
3490 ata_dev_info(dev, "configured for %s%s\n",
3491 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3492 dev_err_whine);
3493
3494 return 0;
3495
3496 fail:
3497 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3498 return -EIO;
3499 }
3500
3501 /**
3502 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3503 * @link: link on which timings will be programmed
3504 * @r_failed_dev: out parameter for failed device
3505 *
3506 * Standard implementation of the function used to tune and set
3507 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3508 * ata_dev_set_mode() fails, pointer to the failing device is
3509 * returned in @r_failed_dev.
3510 *
3511 * LOCKING:
3512 * PCI/etc. bus probe sem.
3513 *
3514 * RETURNS:
3515 * 0 on success, negative errno otherwise
3516 */
3517
3518 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3519 {
3520 struct ata_port *ap = link->ap;
3521 struct ata_device *dev;
3522 int rc = 0, used_dma = 0, found = 0;
3523
3524 /* step 1: calculate xfer_mask */
3525 ata_for_each_dev(dev, link, ENABLED) {
3526 unsigned long pio_mask, dma_mask;
3527 unsigned int mode_mask;
3528
3529 mode_mask = ATA_DMA_MASK_ATA;
3530 if (dev->class == ATA_DEV_ATAPI)
3531 mode_mask = ATA_DMA_MASK_ATAPI;
3532 else if (ata_id_is_cfa(dev->id))
3533 mode_mask = ATA_DMA_MASK_CFA;
3534
3535 ata_dev_xfermask(dev);
3536 ata_force_xfermask(dev);
3537
3538 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3539
3540 if (libata_dma_mask & mode_mask)
3541 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3542 dev->udma_mask);
3543 else
3544 dma_mask = 0;
3545
3546 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3547 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3548
3549 found = 1;
3550 if (ata_dma_enabled(dev))
3551 used_dma = 1;
3552 }
3553 if (!found)
3554 goto out;
3555
3556 /* step 2: always set host PIO timings */
3557 ata_for_each_dev(dev, link, ENABLED) {
3558 if (dev->pio_mode == 0xff) {
3559 ata_dev_warn(dev, "no PIO support\n");
3560 rc = -EINVAL;
3561 goto out;
3562 }
3563
3564 dev->xfer_mode = dev->pio_mode;
3565 dev->xfer_shift = ATA_SHIFT_PIO;
3566 if (ap->ops->set_piomode)
3567 ap->ops->set_piomode(ap, dev);
3568 }
3569
3570 /* step 3: set host DMA timings */
3571 ata_for_each_dev(dev, link, ENABLED) {
3572 if (!ata_dma_enabled(dev))
3573 continue;
3574
3575 dev->xfer_mode = dev->dma_mode;
3576 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3577 if (ap->ops->set_dmamode)
3578 ap->ops->set_dmamode(ap, dev);
3579 }
3580
3581 /* step 4: update devices' xfer mode */
3582 ata_for_each_dev(dev, link, ENABLED) {
3583 rc = ata_dev_set_mode(dev);
3584 if (rc)
3585 goto out;
3586 }
3587
3588 /* Record simplex status. If we selected DMA then the other
3589 * host channels are not permitted to do so.
3590 */
3591 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3592 ap->host->simplex_claimed = ap;
3593
3594 out:
3595 if (rc)
3596 *r_failed_dev = dev;
3597 return rc;
3598 }
3599
3600 /**
3601 * ata_wait_ready - wait for link to become ready
3602 * @link: link to be waited on
3603 * @deadline: deadline jiffies for the operation
3604 * @check_ready: callback to check link readiness
3605 *
3606 * Wait for @link to become ready. @check_ready should return
3607 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3608 * link doesn't seem to be occupied, other errno for other error
3609 * conditions.
3610 *
3611 * Transient -ENODEV conditions are allowed for
3612 * ATA_TMOUT_FF_WAIT.
3613 *
3614 * LOCKING:
3615 * EH context.
3616 *
3617 * RETURNS:
3618 * 0 if @link is ready before @deadline; otherwise, -errno.
3619 */
3620 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3621 int (*check_ready)(struct ata_link *link))
3622 {
3623 unsigned long start = jiffies;
3624 unsigned long nodev_deadline;
3625 int warned = 0;
3626
3627 /* choose which 0xff timeout to use, read comment in libata.h */
3628 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3629 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3630 else
3631 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3632
3633 /* Slave readiness can't be tested separately from master. On
3634 * M/S emulation configuration, this function should be called
3635 * only on the master and it will handle both master and slave.
3636 */
3637 WARN_ON(link == link->ap->slave_link);
3638
3639 if (time_after(nodev_deadline, deadline))
3640 nodev_deadline = deadline;
3641
3642 while (1) {
3643 unsigned long now = jiffies;
3644 int ready, tmp;
3645
3646 ready = tmp = check_ready(link);
3647 if (ready > 0)
3648 return 0;
3649
3650 /*
3651 * -ENODEV could be transient. Ignore -ENODEV if link
3652 * is online. Also, some SATA devices take a long
3653 * time to clear 0xff after reset. Wait for
3654 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3655 * offline.
3656 *
3657 * Note that some PATA controllers (pata_ali) explode
3658 * if status register is read more than once when
3659 * there's no device attached.
3660 */
3661 if (ready == -ENODEV) {
3662 if (ata_link_online(link))
3663 ready = 0;
3664 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3665 !ata_link_offline(link) &&
3666 time_before(now, nodev_deadline))
3667 ready = 0;
3668 }
3669
3670 if (ready)
3671 return ready;
3672 if (time_after(now, deadline))
3673 return -EBUSY;
3674
3675 if (!warned && time_after(now, start + 5 * HZ) &&
3676 (deadline - now > 3 * HZ)) {
3677 ata_link_warn(link,
3678 "link is slow to respond, please be patient "
3679 "(ready=%d)\n", tmp);
3680 warned = 1;
3681 }
3682
3683 ata_msleep(link->ap, 50);
3684 }
3685 }
3686
3687 /**
3688 * ata_wait_after_reset - wait for link to become ready after reset
3689 * @link: link to be waited on
3690 * @deadline: deadline jiffies for the operation
3691 * @check_ready: callback to check link readiness
3692 *
3693 * Wait for @link to become ready after reset.
3694 *
3695 * LOCKING:
3696 * EH context.
3697 *
3698 * RETURNS:
3699 * 0 if @link is ready before @deadline; otherwise, -errno.
3700 */
3701 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3702 int (*check_ready)(struct ata_link *link))
3703 {
3704 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3705
3706 return ata_wait_ready(link, deadline, check_ready);
3707 }
3708
3709 /**
3710 * sata_link_debounce - debounce SATA phy status
3711 * @link: ATA link to debounce SATA phy status for
3712 * @params: timing parameters { interval, duration, timeout } in msec
3713 * @deadline: deadline jiffies for the operation
3714 *
3715 * Make sure SStatus of @link reaches stable state, determined by
3716 * holding the same value where DET is not 1 for @duration polled
3717 * every @interval, before @timeout. Timeout constraints the
3718 * beginning of the stable state. Because DET gets stuck at 1 on
3719 * some controllers after hot unplugging, this functions waits
3720 * until timeout then returns 0 if DET is stable at 1.
3721 *
3722 * @timeout is further limited by @deadline. The sooner of the
3723 * two is used.
3724 *
3725 * LOCKING:
3726 * Kernel thread context (may sleep)
3727 *
3728 * RETURNS:
3729 * 0 on success, -errno on failure.
3730 */
3731 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3732 unsigned long deadline)
3733 {
3734 unsigned long interval = params[0];
3735 unsigned long duration = params[1];
3736 unsigned long last_jiffies, t;
3737 u32 last, cur;
3738 int rc;
3739
3740 t = ata_deadline(jiffies, params[2]);
3741 if (time_before(t, deadline))
3742 deadline = t;
3743
3744 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3745 return rc;
3746 cur &= 0xf;
3747
3748 last = cur;
3749 last_jiffies = jiffies;
3750
3751 while (1) {
3752 ata_msleep(link->ap, interval);
3753 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3754 return rc;
3755 cur &= 0xf;
3756
3757 /* DET stable? */
3758 if (cur == last) {
3759 if (cur == 1 && time_before(jiffies, deadline))
3760 continue;
3761 if (time_after(jiffies,
3762 ata_deadline(last_jiffies, duration)))
3763 return 0;
3764 continue;
3765 }
3766
3767 /* unstable, start over */
3768 last = cur;
3769 last_jiffies = jiffies;
3770
3771 /* Check deadline. If debouncing failed, return
3772 * -EPIPE to tell upper layer to lower link speed.
3773 */
3774 if (time_after(jiffies, deadline))
3775 return -EPIPE;
3776 }
3777 }
3778
3779 /**
3780 * sata_link_resume - resume SATA link
3781 * @link: ATA link to resume SATA
3782 * @params: timing parameters { interval, duration, timeout } in msec
3783 * @deadline: deadline jiffies for the operation
3784 *
3785 * Resume SATA phy @link and debounce it.
3786 *
3787 * LOCKING:
3788 * Kernel thread context (may sleep)
3789 *
3790 * RETURNS:
3791 * 0 on success, -errno on failure.
3792 */
3793 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3794 unsigned long deadline)
3795 {
3796 int tries = ATA_LINK_RESUME_TRIES;
3797 u32 scontrol, serror;
3798 int rc;
3799
3800 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3801 return rc;
3802
3803 /*
3804 * Writes to SControl sometimes get ignored under certain
3805 * controllers (ata_piix SIDPR). Make sure DET actually is
3806 * cleared.
3807 */
3808 do {
3809 scontrol = (scontrol & 0x0f0) | 0x300;
3810 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3811 return rc;
3812 /*
3813 * Some PHYs react badly if SStatus is pounded
3814 * immediately after resuming. Delay 200ms before
3815 * debouncing.
3816 */
3817 if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
3818 ata_msleep(link->ap, 200);
3819
3820 /* is SControl restored correctly? */
3821 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3822 return rc;
3823 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3824
3825 if ((scontrol & 0xf0f) != 0x300) {
3826 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3827 scontrol);
3828 return 0;
3829 }
3830
3831 if (tries < ATA_LINK_RESUME_TRIES)
3832 ata_link_warn(link, "link resume succeeded after %d retries\n",
3833 ATA_LINK_RESUME_TRIES - tries);
3834
3835 if ((rc = sata_link_debounce(link, params, deadline)))
3836 return rc;
3837
3838 /* clear SError, some PHYs require this even for SRST to work */
3839 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3840 rc = sata_scr_write(link, SCR_ERROR, serror);
3841
3842 return rc != -EINVAL ? rc : 0;
3843 }
3844
3845 /**
3846 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3847 * @link: ATA link to manipulate SControl for
3848 * @policy: LPM policy to configure
3849 * @spm_wakeup: initiate LPM transition to active state
3850 *
3851 * Manipulate the IPM field of the SControl register of @link
3852 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
3853 * @spm_wakeup is %true, the SPM field is manipulated to wake up
3854 * the link. This function also clears PHYRDY_CHG before
3855 * returning.
3856 *
3857 * LOCKING:
3858 * EH context.
3859 *
3860 * RETURNS:
3861 * 0 on success, -errno otherwise.
3862 */
3863 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3864 bool spm_wakeup)
3865 {
3866 struct ata_eh_context *ehc = &link->eh_context;
3867 bool woken_up = false;
3868 u32 scontrol;
3869 int rc;
3870
3871 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3872 if (rc)
3873 return rc;
3874
3875 switch (policy) {
3876 case ATA_LPM_MAX_POWER:
3877 /* disable all LPM transitions */
3878 scontrol |= (0x7 << 8);
3879 /* initiate transition to active state */
3880 if (spm_wakeup) {
3881 scontrol |= (0x4 << 12);
3882 woken_up = true;
3883 }
3884 break;
3885 case ATA_LPM_MED_POWER:
3886 /* allow LPM to PARTIAL */
3887 scontrol &= ~(0x1 << 8);
3888 scontrol |= (0x6 << 8);
3889 break;
3890 case ATA_LPM_MIN_POWER:
3891 if (ata_link_nr_enabled(link) > 0)
3892 /* no restrictions on LPM transitions */
3893 scontrol &= ~(0x7 << 8);
3894 else {
3895 /* empty port, power off */
3896 scontrol &= ~0xf;
3897 scontrol |= (0x1 << 2);
3898 }
3899 break;
3900 default:
3901 WARN_ON(1);
3902 }
3903
3904 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3905 if (rc)
3906 return rc;
3907
3908 /* give the link time to transit out of LPM state */
3909 if (woken_up)
3910 msleep(10);
3911
3912 /* clear PHYRDY_CHG from SError */
3913 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3914 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3915 }
3916
3917 /**
3918 * ata_std_prereset - prepare for reset
3919 * @link: ATA link to be reset
3920 * @deadline: deadline jiffies for the operation
3921 *
3922 * @link is about to be reset. Initialize it. Failure from
3923 * prereset makes libata abort whole reset sequence and give up
3924 * that port, so prereset should be best-effort. It does its
3925 * best to prepare for reset sequence but if things go wrong, it
3926 * should just whine, not fail.
3927 *
3928 * LOCKING:
3929 * Kernel thread context (may sleep)
3930 *
3931 * RETURNS:
3932 * 0 on success, -errno otherwise.
3933 */
3934 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3935 {
3936 struct ata_port *ap = link->ap;
3937 struct ata_eh_context *ehc = &link->eh_context;
3938 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3939 int rc;
3940
3941 /* if we're about to do hardreset, nothing more to do */
3942 if (ehc->i.action & ATA_EH_HARDRESET)
3943 return 0;
3944
3945 /* if SATA, resume link */
3946 if (ap->flags & ATA_FLAG_SATA) {
3947 rc = sata_link_resume(link, timing, deadline);
3948 /* whine about phy resume failure but proceed */
3949 if (rc && rc != -EOPNOTSUPP)
3950 ata_link_warn(link,
3951 "failed to resume link for reset (errno=%d)\n",
3952 rc);
3953 }
3954
3955 /* no point in trying softreset on offline link */
3956 if (ata_phys_link_offline(link))
3957 ehc->i.action &= ~ATA_EH_SOFTRESET;
3958
3959 return 0;
3960 }
3961
3962 /**
3963 * sata_link_hardreset - reset link via SATA phy reset
3964 * @link: link to reset
3965 * @timing: timing parameters { interval, duration, timeout } in msec
3966 * @deadline: deadline jiffies for the operation
3967 * @online: optional out parameter indicating link onlineness
3968 * @check_ready: optional callback to check link readiness
3969 *
3970 * SATA phy-reset @link using DET bits of SControl register.
3971 * After hardreset, link readiness is waited upon using
3972 * ata_wait_ready() if @check_ready is specified. LLDs are
3973 * allowed to not specify @check_ready and wait itself after this
3974 * function returns. Device classification is LLD's
3975 * responsibility.
3976 *
3977 * *@online is set to one iff reset succeeded and @link is online
3978 * after reset.
3979 *
3980 * LOCKING:
3981 * Kernel thread context (may sleep)
3982 *
3983 * RETURNS:
3984 * 0 on success, -errno otherwise.
3985 */
3986 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3987 unsigned long deadline,
3988 bool *online, int (*check_ready)(struct ata_link *))
3989 {
3990 u32 scontrol;
3991 int rc;
3992
3993 DPRINTK("ENTER\n");
3994
3995 if (online)
3996 *online = false;
3997
3998 if (sata_set_spd_needed(link)) {
3999 /* SATA spec says nothing about how to reconfigure
4000 * spd. To be on the safe side, turn off phy during
4001 * reconfiguration. This works for at least ICH7 AHCI
4002 * and Sil3124.
4003 */
4004 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4005 goto out;
4006
4007 scontrol = (scontrol & 0x0f0) | 0x304;
4008
4009 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
4010 goto out;
4011
4012 sata_set_spd(link);
4013 }
4014
4015 /* issue phy wake/reset */
4016 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4017 goto out;
4018
4019 scontrol = (scontrol & 0x0f0) | 0x301;
4020
4021 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
4022 goto out;
4023
4024 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
4025 * 10.4.2 says at least 1 ms.
4026 */
4027 ata_msleep(link->ap, 1);
4028
4029 /* bring link back */
4030 rc = sata_link_resume(link, timing, deadline);
4031 if (rc)
4032 goto out;
4033 /* if link is offline nothing more to do */
4034 if (ata_phys_link_offline(link))
4035 goto out;
4036
4037 /* Link is online. From this point, -ENODEV too is an error. */
4038 if (online)
4039 *online = true;
4040
4041 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
4042 /* If PMP is supported, we have to do follow-up SRST.
4043 * Some PMPs don't send D2H Reg FIS after hardreset if
4044 * the first port is empty. Wait only for
4045 * ATA_TMOUT_PMP_SRST_WAIT.
4046 */
4047 if (check_ready) {
4048 unsigned long pmp_deadline;
4049
4050 pmp_deadline = ata_deadline(jiffies,
4051 ATA_TMOUT_PMP_SRST_WAIT);
4052 if (time_after(pmp_deadline, deadline))
4053 pmp_deadline = deadline;
4054 ata_wait_ready(link, pmp_deadline, check_ready);
4055 }
4056 rc = -EAGAIN;
4057 goto out;
4058 }
4059
4060 rc = 0;
4061 if (check_ready)
4062 rc = ata_wait_ready(link, deadline, check_ready);
4063 out:
4064 if (rc && rc != -EAGAIN) {
4065 /* online is set iff link is online && reset succeeded */
4066 if (online)
4067 *online = false;
4068 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
4069 }
4070 DPRINTK("EXIT, rc=%d\n", rc);
4071 return rc;
4072 }
4073
4074 /**
4075 * sata_std_hardreset - COMRESET w/o waiting or classification
4076 * @link: link to reset
4077 * @class: resulting class of attached device
4078 * @deadline: deadline jiffies for the operation
4079 *
4080 * Standard SATA COMRESET w/o waiting or classification.
4081 *
4082 * LOCKING:
4083 * Kernel thread context (may sleep)
4084 *
4085 * RETURNS:
4086 * 0 if link offline, -EAGAIN if link online, -errno on errors.
4087 */
4088 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4089 unsigned long deadline)
4090 {
4091 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4092 bool online;
4093 int rc;
4094
4095 /* do hardreset */
4096 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
4097 return online ? -EAGAIN : rc;
4098 }
4099
4100 /**
4101 * ata_std_postreset - standard postreset callback
4102 * @link: the target ata_link
4103 * @classes: classes of attached devices
4104 *
4105 * This function is invoked after a successful reset. Note that
4106 * the device might have been reset more than once using
4107 * different reset methods before postreset is invoked.
4108 *
4109 * LOCKING:
4110 * Kernel thread context (may sleep)
4111 */
4112 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4113 {
4114 u32 serror;
4115
4116 DPRINTK("ENTER\n");
4117
4118 /* reset complete, clear SError */
4119 if (!sata_scr_read(link, SCR_ERROR, &serror))
4120 sata_scr_write(link, SCR_ERROR, serror);
4121
4122 /* print link status */
4123 sata_print_link_status(link);
4124
4125 DPRINTK("EXIT\n");
4126 }
4127
4128 /**
4129 * ata_dev_same_device - Determine whether new ID matches configured device
4130 * @dev: device to compare against
4131 * @new_class: class of the new device
4132 * @new_id: IDENTIFY page of the new device
4133 *
4134 * Compare @new_class and @new_id against @dev and determine
4135 * whether @dev is the device indicated by @new_class and
4136 * @new_id.
4137 *
4138 * LOCKING:
4139 * None.
4140 *
4141 * RETURNS:
4142 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4143 */
4144 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4145 const u16 *new_id)
4146 {
4147 const u16 *old_id = dev->id;
4148 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4149 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4150
4151 if (dev->class != new_class) {
4152 ata_dev_info(dev, "class mismatch %d != %d\n",
4153 dev->class, new_class);
4154 return 0;
4155 }
4156
4157 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4158 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4159 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4160 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4161
4162 if (strcmp(model[0], model[1])) {
4163 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
4164 model[0], model[1]);
4165 return 0;
4166 }
4167
4168 if (strcmp(serial[0], serial[1])) {
4169 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
4170 serial[0], serial[1]);
4171 return 0;
4172 }
4173
4174 return 1;
4175 }
4176
4177 /**
4178 * ata_dev_reread_id - Re-read IDENTIFY data
4179 * @dev: target ATA device
4180 * @readid_flags: read ID flags
4181 *
4182 * Re-read IDENTIFY page and make sure @dev is still attached to
4183 * the port.
4184 *
4185 * LOCKING:
4186 * Kernel thread context (may sleep)
4187 *
4188 * RETURNS:
4189 * 0 on success, negative errno otherwise
4190 */
4191 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4192 {
4193 unsigned int class = dev->class;
4194 u16 *id = (void *)dev->link->ap->sector_buf;
4195 int rc;
4196
4197 /* read ID data */
4198 rc = ata_dev_read_id(dev, &class, readid_flags, id);
4199 if (rc)
4200 return rc;
4201
4202 /* is the device still there? */
4203 if (!ata_dev_same_device(dev, class, id))
4204 return -ENODEV;
4205
4206 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4207 return 0;
4208 }
4209
4210 /**
4211 * ata_dev_revalidate - Revalidate ATA device
4212 * @dev: device to revalidate
4213 * @new_class: new class code
4214 * @readid_flags: read ID flags
4215 *
4216 * Re-read IDENTIFY page, make sure @dev is still attached to the
4217 * port and reconfigure it according to the new IDENTIFY page.
4218 *
4219 * LOCKING:
4220 * Kernel thread context (may sleep)
4221 *
4222 * RETURNS:
4223 * 0 on success, negative errno otherwise
4224 */
4225 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4226 unsigned int readid_flags)
4227 {
4228 u64 n_sectors = dev->n_sectors;
4229 u64 n_native_sectors = dev->n_native_sectors;
4230 int rc;
4231
4232 if (!ata_dev_enabled(dev))
4233 return -ENODEV;
4234
4235 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4236 if (ata_class_enabled(new_class) &&
4237 new_class != ATA_DEV_ATA &&
4238 new_class != ATA_DEV_ATAPI &&
4239 new_class != ATA_DEV_ZAC &&
4240 new_class != ATA_DEV_SEMB) {
4241 ata_dev_info(dev, "class mismatch %u != %u\n",
4242 dev->class, new_class);
4243 rc = -ENODEV;
4244 goto fail;
4245 }
4246
4247 /* re-read ID */
4248 rc = ata_dev_reread_id(dev, readid_flags);
4249 if (rc)
4250 goto fail;
4251
4252 /* configure device according to the new ID */
4253 rc = ata_dev_configure(dev);
4254 if (rc)
4255 goto fail;
4256
4257 /* verify n_sectors hasn't changed */
4258 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4259 dev->n_sectors == n_sectors)
4260 return 0;
4261
4262 /* n_sectors has changed */
4263 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4264 (unsigned long long)n_sectors,
4265 (unsigned long long)dev->n_sectors);
4266
4267 /*
4268 * Something could have caused HPA to be unlocked
4269 * involuntarily. If n_native_sectors hasn't changed and the
4270 * new size matches it, keep the device.
4271 */
4272 if (dev->n_native_sectors == n_native_sectors &&
4273 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4274 ata_dev_warn(dev,
4275 "new n_sectors matches native, probably "
4276 "late HPA unlock, n_sectors updated\n");
4277 /* use the larger n_sectors */
4278 return 0;
4279 }
4280
4281 /*
4282 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4283 * unlocking HPA in those cases.
4284 *
4285 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4286 */
4287 if (dev->n_native_sectors == n_native_sectors &&
4288 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4289 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4290 ata_dev_warn(dev,
4291 "old n_sectors matches native, probably "
4292 "late HPA lock, will try to unlock HPA\n");
4293 /* try unlocking HPA */
4294 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4295 rc = -EIO;
4296 } else
4297 rc = -ENODEV;
4298
4299 /* restore original n_[native_]sectors and fail */
4300 dev->n_native_sectors = n_native_sectors;
4301 dev->n_sectors = n_sectors;
4302 fail:
4303 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4304 return rc;
4305 }
4306
4307 struct ata_blacklist_entry {
4308 const char *model_num;
4309 const char *model_rev;
4310 unsigned long horkage;
4311 };
4312
4313 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4314 /* Devices with DMA related problems under Linux */
4315 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4316 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4317 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4318 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4319 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4320 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4321 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4322 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4323 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4324 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4325 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4326 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4327 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4328 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4329 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4330 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4331 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4332 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4333 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4334 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4335 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4336 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4337 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4338 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4339 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4340 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4341 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4342 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4343 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4344 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
4345 /* Odd clown on sil3726/4726 PMPs */
4346 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4347
4348 /* Weird ATAPI devices */
4349 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4350 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4351 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4352 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4353
4354 /*
4355 * Causes silent data corruption with higher max sects.
4356 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4357 */
4358 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4359
4360 /*
4361 * These devices time out with higher max sects.
4362 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4363 */
4364 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4365
4366 /* Devices we expect to fail diagnostics */
4367
4368 /* Devices where NCQ should be avoided */
4369 /* NCQ is slow */
4370 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4371 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4372 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4373 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4374 /* NCQ is broken */
4375 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4376 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4377 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4378 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4379 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4380
4381 /* Seagate NCQ + FLUSH CACHE firmware bug */
4382 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4383 ATA_HORKAGE_FIRMWARE_WARN },
4384
4385 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4386 ATA_HORKAGE_FIRMWARE_WARN },
4387
4388 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4389 ATA_HORKAGE_FIRMWARE_WARN },
4390
4391 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4392 ATA_HORKAGE_FIRMWARE_WARN },
4393
4394 /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
4395 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4396 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4397 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
4398
4399 /* Blacklist entries taken from Silicon Image 3124/3132
4400 Windows driver .inf file - also several Linux problem reports */
4401 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4402 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4403 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4404
4405 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4406 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4407
4408 /* devices which puke on READ_NATIVE_MAX */
4409 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4410 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4411 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4412 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4413
4414 /* this one allows HPA unlocking but fails IOs on the area */
4415 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4416
4417 /* Devices which report 1 sector over size HPA */
4418 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4419 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4420 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4421
4422 /* Devices which get the IVB wrong */
4423 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4424 /* Maybe we should just blacklist TSSTcorp... */
4425 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4426
4427 /* Devices that do not need bridging limits applied */
4428 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4429 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4430
4431 /* Devices which aren't very happy with higher link speeds */
4432 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4433 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
4434
4435 /*
4436 * Devices which choke on SETXFER. Applies only if both the
4437 * device and controller are SATA.
4438 */
4439 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4440 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4441 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4442 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4443 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4444
4445 /* devices that don't properly handle queued TRIM commands */
4446 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4447 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4448 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4449 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4450 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4451 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4452 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4453 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4454 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4455 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4456 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4457 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4458 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4459 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4460
4461 /* devices that don't properly handle TRIM commands */
4462 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4463
4464 /*
4465 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4466 * (Return Zero After Trim) flags in the ATA Command Set are
4467 * unreliable in the sense that they only define what happens if
4468 * the device successfully executed the DSM TRIM command. TRIM
4469 * is only advisory, however, and the device is free to silently
4470 * ignore all or parts of the request.
4471 *
4472 * Whitelist drives that are known to reliably return zeroes
4473 * after TRIM.
4474 */
4475
4476 /*
4477 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4478 * that model before whitelisting all other intel SSDs.
4479 */
4480 { "INTEL*SSDSC2MH*", NULL, 0, },
4481
4482 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4483 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4484 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4485 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4486 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4487 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4488 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4489
4490 /*
4491 * Some WD SATA-I drives spin up and down erratically when the link
4492 * is put into the slumber mode. We don't have full list of the
4493 * affected devices. Disable LPM if the device matches one of the
4494 * known prefixes and is SATA-1. As a side effect LPM partial is
4495 * lost too.
4496 *
4497 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4498 */
4499 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4500 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4501 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4502 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4503 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4504 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4505 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4506
4507 /* End Marker */
4508 { }
4509 };
4510
4511 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4512 {
4513 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4514 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4515 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4516
4517 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4518 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4519
4520 while (ad->model_num) {
4521 if (glob_match(ad->model_num, model_num)) {
4522 if (ad->model_rev == NULL)
4523 return ad->horkage;
4524 if (glob_match(ad->model_rev, model_rev))
4525 return ad->horkage;
4526 }
4527 ad++;
4528 }
4529 return 0;
4530 }
4531
4532 static int ata_dma_blacklisted(const struct ata_device *dev)
4533 {
4534 /* We don't support polling DMA.
4535 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4536 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4537 */
4538 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4539 (dev->flags & ATA_DFLAG_CDB_INTR))
4540 return 1;
4541 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4542 }
4543
4544 /**
4545 * ata_is_40wire - check drive side detection
4546 * @dev: device
4547 *
4548 * Perform drive side detection decoding, allowing for device vendors
4549 * who can't follow the documentation.
4550 */
4551
4552 static int ata_is_40wire(struct ata_device *dev)
4553 {
4554 if (dev->horkage & ATA_HORKAGE_IVB)
4555 return ata_drive_40wire_relaxed(dev->id);
4556 return ata_drive_40wire(dev->id);
4557 }
4558
4559 /**
4560 * cable_is_40wire - 40/80/SATA decider
4561 * @ap: port to consider
4562 *
4563 * This function encapsulates the policy for speed management
4564 * in one place. At the moment we don't cache the result but
4565 * there is a good case for setting ap->cbl to the result when
4566 * we are called with unknown cables (and figuring out if it
4567 * impacts hotplug at all).
4568 *
4569 * Return 1 if the cable appears to be 40 wire.
4570 */
4571
4572 static int cable_is_40wire(struct ata_port *ap)
4573 {
4574 struct ata_link *link;
4575 struct ata_device *dev;
4576
4577 /* If the controller thinks we are 40 wire, we are. */
4578 if (ap->cbl == ATA_CBL_PATA40)
4579 return 1;
4580
4581 /* If the controller thinks we are 80 wire, we are. */
4582 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4583 return 0;
4584
4585 /* If the system is known to be 40 wire short cable (eg
4586 * laptop), then we allow 80 wire modes even if the drive
4587 * isn't sure.
4588 */
4589 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4590 return 0;
4591
4592 /* If the controller doesn't know, we scan.
4593 *
4594 * Note: We look for all 40 wire detects at this point. Any
4595 * 80 wire detect is taken to be 80 wire cable because
4596 * - in many setups only the one drive (slave if present) will
4597 * give a valid detect
4598 * - if you have a non detect capable drive you don't want it
4599 * to colour the choice
4600 */
4601 ata_for_each_link(link, ap, EDGE) {
4602 ata_for_each_dev(dev, link, ENABLED) {
4603 if (!ata_is_40wire(dev))
4604 return 0;
4605 }
4606 }
4607 return 1;
4608 }
4609
4610 /**
4611 * ata_dev_xfermask - Compute supported xfermask of the given device
4612 * @dev: Device to compute xfermask for
4613 *
4614 * Compute supported xfermask of @dev and store it in
4615 * dev->*_mask. This function is responsible for applying all
4616 * known limits including host controller limits, device
4617 * blacklist, etc...
4618 *
4619 * LOCKING:
4620 * None.
4621 */
4622 static void ata_dev_xfermask(struct ata_device *dev)
4623 {
4624 struct ata_link *link = dev->link;
4625 struct ata_port *ap = link->ap;
4626 struct ata_host *host = ap->host;
4627 unsigned long xfer_mask;
4628
4629 /* controller modes available */
4630 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4631 ap->mwdma_mask, ap->udma_mask);
4632
4633 /* drive modes available */
4634 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4635 dev->mwdma_mask, dev->udma_mask);
4636 xfer_mask &= ata_id_xfermask(dev->id);
4637
4638 /*
4639 * CFA Advanced TrueIDE timings are not allowed on a shared
4640 * cable
4641 */
4642 if (ata_dev_pair(dev)) {
4643 /* No PIO5 or PIO6 */
4644 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4645 /* No MWDMA3 or MWDMA 4 */
4646 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4647 }
4648
4649 if (ata_dma_blacklisted(dev)) {
4650 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4651 ata_dev_warn(dev,
4652 "device is on DMA blacklist, disabling DMA\n");
4653 }
4654
4655 if ((host->flags & ATA_HOST_SIMPLEX) &&
4656 host->simplex_claimed && host->simplex_claimed != ap) {
4657 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4658 ata_dev_warn(dev,
4659 "simplex DMA is claimed by other device, disabling DMA\n");
4660 }
4661
4662 if (ap->flags & ATA_FLAG_NO_IORDY)
4663 xfer_mask &= ata_pio_mask_no_iordy(dev);
4664
4665 if (ap->ops->mode_filter)
4666 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4667
4668 /* Apply cable rule here. Don't apply it early because when
4669 * we handle hot plug the cable type can itself change.
4670 * Check this last so that we know if the transfer rate was
4671 * solely limited by the cable.
4672 * Unknown or 80 wire cables reported host side are checked
4673 * drive side as well. Cases where we know a 40wire cable
4674 * is used safely for 80 are not checked here.
4675 */
4676 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4677 /* UDMA/44 or higher would be available */
4678 if (cable_is_40wire(ap)) {
4679 ata_dev_warn(dev,
4680 "limited to UDMA/33 due to 40-wire cable\n");
4681 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4682 }
4683
4684 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4685 &dev->mwdma_mask, &dev->udma_mask);
4686 }
4687
4688 /**
4689 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4690 * @dev: Device to which command will be sent
4691 *
4692 * Issue SET FEATURES - XFER MODE command to device @dev
4693 * on port @ap.
4694 *
4695 * LOCKING:
4696 * PCI/etc. bus probe sem.
4697 *
4698 * RETURNS:
4699 * 0 on success, AC_ERR_* mask otherwise.
4700 */
4701
4702 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4703 {
4704 struct ata_taskfile tf;
4705 unsigned int err_mask;
4706
4707 /* set up set-features taskfile */
4708 DPRINTK("set features - xfer mode\n");
4709
4710 /* Some controllers and ATAPI devices show flaky interrupt
4711 * behavior after setting xfer mode. Use polling instead.
4712 */
4713 ata_tf_init(dev, &tf);
4714 tf.command = ATA_CMD_SET_FEATURES;
4715 tf.feature = SETFEATURES_XFER;
4716 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4717 tf.protocol = ATA_PROT_NODATA;
4718 /* If we are using IORDY we must send the mode setting command */
4719 if (ata_pio_need_iordy(dev))
4720 tf.nsect = dev->xfer_mode;
4721 /* If the device has IORDY and the controller does not - turn it off */
4722 else if (ata_id_has_iordy(dev->id))
4723 tf.nsect = 0x01;
4724 else /* In the ancient relic department - skip all of this */
4725 return 0;
4726
4727 /* On some disks, this command causes spin-up, so we need longer timeout */
4728 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4729
4730 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4731 return err_mask;
4732 }
4733
4734 /**
4735 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4736 * @dev: Device to which command will be sent
4737 * @enable: Whether to enable or disable the feature
4738 * @feature: The sector count represents the feature to set
4739 *
4740 * Issue SET FEATURES - SATA FEATURES command to device @dev
4741 * on port @ap with sector count
4742 *
4743 * LOCKING:
4744 * PCI/etc. bus probe sem.
4745 *
4746 * RETURNS:
4747 * 0 on success, AC_ERR_* mask otherwise.
4748 */
4749 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4750 {
4751 struct ata_taskfile tf;
4752 unsigned int err_mask;
4753 unsigned long timeout = 0;
4754
4755 /* set up set-features taskfile */
4756 DPRINTK("set features - SATA features\n");
4757
4758 ata_tf_init(dev, &tf);
4759 tf.command = ATA_CMD_SET_FEATURES;
4760 tf.feature = enable;
4761 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4762 tf.protocol = ATA_PROT_NODATA;
4763 tf.nsect = feature;
4764
4765 if (enable == SETFEATURES_SPINUP)
4766 timeout = ata_probe_timeout ?
4767 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4768 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4769
4770 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4771 return err_mask;
4772 }
4773 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4774
4775 /**
4776 * ata_dev_init_params - Issue INIT DEV PARAMS command
4777 * @dev: Device to which command will be sent
4778 * @heads: Number of heads (taskfile parameter)
4779 * @sectors: Number of sectors (taskfile parameter)
4780 *
4781 * LOCKING:
4782 * Kernel thread context (may sleep)
4783 *
4784 * RETURNS:
4785 * 0 on success, AC_ERR_* mask otherwise.
4786 */
4787 static unsigned int ata_dev_init_params(struct ata_device *dev,
4788 u16 heads, u16 sectors)
4789 {
4790 struct ata_taskfile tf;
4791 unsigned int err_mask;
4792
4793 /* Number of sectors per track 1-255. Number of heads 1-16 */
4794 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4795 return AC_ERR_INVALID;
4796
4797 /* set up init dev params taskfile */
4798 DPRINTK("init dev params \n");
4799
4800 ata_tf_init(dev, &tf);
4801 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4802 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4803 tf.protocol = ATA_PROT_NODATA;
4804 tf.nsect = sectors;
4805 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4806
4807 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4808 /* A clean abort indicates an original or just out of spec drive
4809 and we should continue as we issue the setup based on the
4810 drive reported working geometry */
4811 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4812 err_mask = 0;
4813
4814 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4815 return err_mask;
4816 }
4817
4818 /**
4819 * atapi_check_dma - Check whether ATAPI DMA can be supported
4820 * @qc: Metadata associated with taskfile to check
4821 *
4822 * Allow low-level driver to filter ATA PACKET commands, returning
4823 * a status indicating whether or not it is OK to use DMA for the
4824 * supplied PACKET command.
4825 *
4826 * LOCKING:
4827 * spin_lock_irqsave(host lock)
4828 *
4829 * RETURNS: 0 when ATAPI DMA can be used
4830 * nonzero otherwise
4831 */
4832 int atapi_check_dma(struct ata_queued_cmd *qc)
4833 {
4834 struct ata_port *ap = qc->ap;
4835
4836 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4837 * few ATAPI devices choke on such DMA requests.
4838 */
4839 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4840 unlikely(qc->nbytes & 15))
4841 return 1;
4842
4843 if (ap->ops->check_atapi_dma)
4844 return ap->ops->check_atapi_dma(qc);
4845
4846 return 0;
4847 }
4848
4849 /**
4850 * ata_std_qc_defer - Check whether a qc needs to be deferred
4851 * @qc: ATA command in question
4852 *
4853 * Non-NCQ commands cannot run with any other command, NCQ or
4854 * not. As upper layer only knows the queue depth, we are
4855 * responsible for maintaining exclusion. This function checks
4856 * whether a new command @qc can be issued.
4857 *
4858 * LOCKING:
4859 * spin_lock_irqsave(host lock)
4860 *
4861 * RETURNS:
4862 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4863 */
4864 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4865 {
4866 struct ata_link *link = qc->dev->link;
4867
4868 if (ata_is_ncq(qc->tf.protocol)) {
4869 if (!ata_tag_valid(link->active_tag))
4870 return 0;
4871 } else {
4872 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4873 return 0;
4874 }
4875
4876 return ATA_DEFER_LINK;
4877 }
4878
4879 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4880
4881 /**
4882 * ata_sg_init - Associate command with scatter-gather table.
4883 * @qc: Command to be associated
4884 * @sg: Scatter-gather table.
4885 * @n_elem: Number of elements in s/g table.
4886 *
4887 * Initialize the data-related elements of queued_cmd @qc
4888 * to point to a scatter-gather table @sg, containing @n_elem
4889 * elements.
4890 *
4891 * LOCKING:
4892 * spin_lock_irqsave(host lock)
4893 */
4894 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4895 unsigned int n_elem)
4896 {
4897 qc->sg = sg;
4898 qc->n_elem = n_elem;
4899 qc->cursg = qc->sg;
4900 }
4901
4902 #ifdef CONFIG_HAS_DMA
4903
4904 /**
4905 * ata_sg_clean - Unmap DMA memory associated with command
4906 * @qc: Command containing DMA memory to be released
4907 *
4908 * Unmap all mapped DMA memory associated with this command.
4909 *
4910 * LOCKING:
4911 * spin_lock_irqsave(host lock)
4912 */
4913 void ata_sg_clean(struct ata_queued_cmd *qc)
4914 {
4915 struct ata_port *ap = qc->ap;
4916 struct scatterlist *sg = qc->sg;
4917 int dir = qc->dma_dir;
4918
4919 WARN_ON_ONCE(sg == NULL);
4920
4921 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4922
4923 if (qc->n_elem)
4924 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4925
4926 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4927 qc->sg = NULL;
4928 }
4929
4930 /**
4931 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4932 * @qc: Command with scatter-gather table to be mapped.
4933 *
4934 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4935 *
4936 * LOCKING:
4937 * spin_lock_irqsave(host lock)
4938 *
4939 * RETURNS:
4940 * Zero on success, negative on error.
4941 *
4942 */
4943 static int ata_sg_setup(struct ata_queued_cmd *qc)
4944 {
4945 struct ata_port *ap = qc->ap;
4946 unsigned int n_elem;
4947
4948 VPRINTK("ENTER, ata%u\n", ap->print_id);
4949
4950 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4951 if (n_elem < 1)
4952 return -1;
4953
4954 DPRINTK("%d sg elements mapped\n", n_elem);
4955 qc->orig_n_elem = qc->n_elem;
4956 qc->n_elem = n_elem;
4957 qc->flags |= ATA_QCFLAG_DMAMAP;
4958
4959 return 0;
4960 }
4961
4962 #else /* !CONFIG_HAS_DMA */
4963
4964 static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
4965 static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
4966
4967 #endif /* !CONFIG_HAS_DMA */
4968
4969 /**
4970 * swap_buf_le16 - swap halves of 16-bit words in place
4971 * @buf: Buffer to swap
4972 * @buf_words: Number of 16-bit words in buffer.
4973 *
4974 * Swap halves of 16-bit words if needed to convert from
4975 * little-endian byte order to native cpu byte order, or
4976 * vice-versa.
4977 *
4978 * LOCKING:
4979 * Inherited from caller.
4980 */
4981 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4982 {
4983 #ifdef __BIG_ENDIAN
4984 unsigned int i;
4985
4986 for (i = 0; i < buf_words; i++)
4987 buf[i] = le16_to_cpu(buf[i]);
4988 #endif /* __BIG_ENDIAN */
4989 }
4990
4991 /**
4992 * ata_qc_new_init - Request an available ATA command, and initialize it
4993 * @dev: Device from whom we request an available command structure
4994 * @tag: tag
4995 *
4996 * LOCKING:
4997 * None.
4998 */
4999
5000 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
5001 {
5002 struct ata_port *ap = dev->link->ap;
5003 struct ata_queued_cmd *qc;
5004
5005 /* no command while frozen */
5006 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5007 return NULL;
5008
5009 /* libsas case */
5010 if (ap->flags & ATA_FLAG_SAS_HOST) {
5011 tag = ata_sas_allocate_tag(ap);
5012 if (tag < 0)
5013 return NULL;
5014 }
5015
5016 qc = __ata_qc_from_tag(ap, tag);
5017 qc->tag = tag;
5018 qc->scsicmd = NULL;
5019 qc->ap = ap;
5020 qc->dev = dev;
5021
5022 ata_qc_reinit(qc);
5023
5024 return qc;
5025 }
5026
5027 /**
5028 * ata_qc_free - free unused ata_queued_cmd
5029 * @qc: Command to complete
5030 *
5031 * Designed to free unused ata_queued_cmd object
5032 * in case something prevents using it.
5033 *
5034 * LOCKING:
5035 * spin_lock_irqsave(host lock)
5036 */
5037 void ata_qc_free(struct ata_queued_cmd *qc)
5038 {
5039 struct ata_port *ap;
5040 unsigned int tag;
5041
5042 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5043 ap = qc->ap;
5044
5045 qc->flags = 0;
5046 tag = qc->tag;
5047 if (likely(ata_tag_valid(tag))) {
5048 qc->tag = ATA_TAG_POISON;
5049 if (ap->flags & ATA_FLAG_SAS_HOST)
5050 ata_sas_free_tag(tag, ap);
5051 }
5052 }
5053
5054 void __ata_qc_complete(struct ata_queued_cmd *qc)
5055 {
5056 struct ata_port *ap;
5057 struct ata_link *link;
5058
5059 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5060 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
5061 ap = qc->ap;
5062 link = qc->dev->link;
5063
5064 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5065 ata_sg_clean(qc);
5066
5067 /* command should be marked inactive atomically with qc completion */
5068 if (ata_is_ncq(qc->tf.protocol)) {
5069 link->sactive &= ~(1 << qc->tag);
5070 if (!link->sactive)
5071 ap->nr_active_links--;
5072 } else {
5073 link->active_tag = ATA_TAG_POISON;
5074 ap->nr_active_links--;
5075 }
5076
5077 /* clear exclusive status */
5078 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5079 ap->excl_link == link))
5080 ap->excl_link = NULL;
5081
5082 /* atapi: mark qc as inactive to prevent the interrupt handler
5083 * from completing the command twice later, before the error handler
5084 * is called. (when rc != 0 and atapi request sense is needed)
5085 */
5086 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5087 ap->qc_active &= ~(1 << qc->tag);
5088
5089 /* call completion callback */
5090 qc->complete_fn(qc);
5091 }
5092
5093 static void fill_result_tf(struct ata_queued_cmd *qc)
5094 {
5095 struct ata_port *ap = qc->ap;
5096
5097 qc->result_tf.flags = qc->tf.flags;
5098 ap->ops->qc_fill_rtf(qc);
5099 }
5100
5101 static void ata_verify_xfer(struct ata_queued_cmd *qc)
5102 {
5103 struct ata_device *dev = qc->dev;
5104
5105 if (!ata_is_data(qc->tf.protocol))
5106 return;
5107
5108 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5109 return;
5110
5111 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5112 }
5113
5114 /**
5115 * ata_qc_complete - Complete an active ATA command
5116 * @qc: Command to complete
5117 *
5118 * Indicate to the mid and upper layers that an ATA command has
5119 * completed, with either an ok or not-ok status.
5120 *
5121 * Refrain from calling this function multiple times when
5122 * successfully completing multiple NCQ commands.
5123 * ata_qc_complete_multiple() should be used instead, which will
5124 * properly update IRQ expect state.
5125 *
5126 * LOCKING:
5127 * spin_lock_irqsave(host lock)
5128 */
5129 void ata_qc_complete(struct ata_queued_cmd *qc)
5130 {
5131 struct ata_port *ap = qc->ap;
5132
5133 /* Trigger the LED (if available) */
5134 ledtrig_disk_activity();
5135
5136 /* XXX: New EH and old EH use different mechanisms to
5137 * synchronize EH with regular execution path.
5138 *
5139 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5140 * Normal execution path is responsible for not accessing a
5141 * failed qc. libata core enforces the rule by returning NULL
5142 * from ata_qc_from_tag() for failed qcs.
5143 *
5144 * Old EH depends on ata_qc_complete() nullifying completion
5145 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5146 * not synchronize with interrupt handler. Only PIO task is
5147 * taken care of.
5148 */
5149 if (ap->ops->error_handler) {
5150 struct ata_device *dev = qc->dev;
5151 struct ata_eh_info *ehi = &dev->link->eh_info;
5152
5153 if (unlikely(qc->err_mask))
5154 qc->flags |= ATA_QCFLAG_FAILED;
5155
5156 /*
5157 * Finish internal commands without any further processing
5158 * and always with the result TF filled.
5159 */
5160 if (unlikely(ata_tag_internal(qc->tag))) {
5161 fill_result_tf(qc);
5162 trace_ata_qc_complete_internal(qc);
5163 __ata_qc_complete(qc);
5164 return;
5165 }
5166
5167 /*
5168 * Non-internal qc has failed. Fill the result TF and
5169 * summon EH.
5170 */
5171 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5172 fill_result_tf(qc);
5173 trace_ata_qc_complete_failed(qc);
5174 ata_qc_schedule_eh(qc);
5175 return;
5176 }
5177
5178 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5179
5180 /* read result TF if requested */
5181 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5182 fill_result_tf(qc);
5183
5184 trace_ata_qc_complete_done(qc);
5185 /* Some commands need post-processing after successful
5186 * completion.
5187 */
5188 switch (qc->tf.command) {
5189 case ATA_CMD_SET_FEATURES:
5190 if (qc->tf.feature != SETFEATURES_WC_ON &&
5191 qc->tf.feature != SETFEATURES_WC_OFF &&
5192 qc->tf.feature != SETFEATURES_RA_ON &&
5193 qc->tf.feature != SETFEATURES_RA_OFF)
5194 break;
5195 /* fall through */
5196 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5197 case ATA_CMD_SET_MULTI: /* multi_count changed */
5198 /* revalidate device */
5199 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5200 ata_port_schedule_eh(ap);
5201 break;
5202
5203 case ATA_CMD_SLEEP:
5204 dev->flags |= ATA_DFLAG_SLEEPING;
5205 break;
5206 }
5207
5208 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5209 ata_verify_xfer(qc);
5210
5211 __ata_qc_complete(qc);
5212 } else {
5213 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5214 return;
5215
5216 /* read result TF if failed or requested */
5217 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5218 fill_result_tf(qc);
5219
5220 __ata_qc_complete(qc);
5221 }
5222 }
5223
5224 /**
5225 * ata_qc_complete_multiple - Complete multiple qcs successfully
5226 * @ap: port in question
5227 * @qc_active: new qc_active mask
5228 *
5229 * Complete in-flight commands. This functions is meant to be
5230 * called from low-level driver's interrupt routine to complete
5231 * requests normally. ap->qc_active and @qc_active is compared
5232 * and commands are completed accordingly.
5233 *
5234 * Always use this function when completing multiple NCQ commands
5235 * from IRQ handlers instead of calling ata_qc_complete()
5236 * multiple times to keep IRQ expect status properly in sync.
5237 *
5238 * LOCKING:
5239 * spin_lock_irqsave(host lock)
5240 *
5241 * RETURNS:
5242 * Number of completed commands on success, -errno otherwise.
5243 */
5244 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5245 {
5246 int nr_done = 0;
5247 u32 done_mask;
5248
5249 done_mask = ap->qc_active ^ qc_active;
5250
5251 if (unlikely(done_mask & qc_active)) {
5252 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5253 ap->qc_active, qc_active);
5254 return -EINVAL;
5255 }
5256
5257 while (done_mask) {
5258 struct ata_queued_cmd *qc;
5259 unsigned int tag = __ffs(done_mask);
5260
5261 qc = ata_qc_from_tag(ap, tag);
5262 if (qc) {
5263 ata_qc_complete(qc);
5264 nr_done++;
5265 }
5266 done_mask &= ~(1 << tag);
5267 }
5268
5269 return nr_done;
5270 }
5271
5272 /**
5273 * ata_qc_issue - issue taskfile to device
5274 * @qc: command to issue to device
5275 *
5276 * Prepare an ATA command to submission to device.
5277 * This includes mapping the data into a DMA-able
5278 * area, filling in the S/G table, and finally
5279 * writing the taskfile to hardware, starting the command.
5280 *
5281 * LOCKING:
5282 * spin_lock_irqsave(host lock)
5283 */
5284 void ata_qc_issue(struct ata_queued_cmd *qc)
5285 {
5286 struct ata_port *ap = qc->ap;
5287 struct ata_link *link = qc->dev->link;
5288 u8 prot = qc->tf.protocol;
5289
5290 /* Make sure only one non-NCQ command is outstanding. The
5291 * check is skipped for old EH because it reuses active qc to
5292 * request ATAPI sense.
5293 */
5294 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5295
5296 if (ata_is_ncq(prot)) {
5297 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5298
5299 if (!link->sactive)
5300 ap->nr_active_links++;
5301 link->sactive |= 1 << qc->tag;
5302 } else {
5303 WARN_ON_ONCE(link->sactive);
5304
5305 ap->nr_active_links++;
5306 link->active_tag = qc->tag;
5307 }
5308
5309 qc->flags |= ATA_QCFLAG_ACTIVE;
5310 ap->qc_active |= 1 << qc->tag;
5311
5312 /*
5313 * We guarantee to LLDs that they will have at least one
5314 * non-zero sg if the command is a data command.
5315 */
5316 if (WARN_ON_ONCE(ata_is_data(prot) &&
5317 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5318 goto sys_err;
5319
5320 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5321 (ap->flags & ATA_FLAG_PIO_DMA)))
5322 if (ata_sg_setup(qc))
5323 goto sys_err;
5324
5325 /* if device is sleeping, schedule reset and abort the link */
5326 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5327 link->eh_info.action |= ATA_EH_RESET;
5328 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5329 ata_link_abort(link);
5330 return;
5331 }
5332
5333 ap->ops->qc_prep(qc);
5334 trace_ata_qc_issue(qc);
5335 qc->err_mask |= ap->ops->qc_issue(qc);
5336 if (unlikely(qc->err_mask))
5337 goto err;
5338 return;
5339
5340 sys_err:
5341 qc->err_mask |= AC_ERR_SYSTEM;
5342 err:
5343 ata_qc_complete(qc);
5344 }
5345
5346 /**
5347 * sata_scr_valid - test whether SCRs are accessible
5348 * @link: ATA link to test SCR accessibility for
5349 *
5350 * Test whether SCRs are accessible for @link.
5351 *
5352 * LOCKING:
5353 * None.
5354 *
5355 * RETURNS:
5356 * 1 if SCRs are accessible, 0 otherwise.
5357 */
5358 int sata_scr_valid(struct ata_link *link)
5359 {
5360 struct ata_port *ap = link->ap;
5361
5362 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5363 }
5364
5365 /**
5366 * sata_scr_read - read SCR register of the specified port
5367 * @link: ATA link to read SCR for
5368 * @reg: SCR to read
5369 * @val: Place to store read value
5370 *
5371 * Read SCR register @reg of @link into *@val. This function is
5372 * guaranteed to succeed if @link is ap->link, the cable type of
5373 * the port is SATA and the port implements ->scr_read.
5374 *
5375 * LOCKING:
5376 * None if @link is ap->link. Kernel thread context otherwise.
5377 *
5378 * RETURNS:
5379 * 0 on success, negative errno on failure.
5380 */
5381 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5382 {
5383 if (ata_is_host_link(link)) {
5384 if (sata_scr_valid(link))
5385 return link->ap->ops->scr_read(link, reg, val);
5386 return -EOPNOTSUPP;
5387 }
5388
5389 return sata_pmp_scr_read(link, reg, val);
5390 }
5391
5392 /**
5393 * sata_scr_write - write SCR register of the specified port
5394 * @link: ATA link to write SCR for
5395 * @reg: SCR to write
5396 * @val: value to write
5397 *
5398 * Write @val to SCR register @reg of @link. This function is
5399 * guaranteed to succeed if @link is ap->link, the cable type of
5400 * the port is SATA and the port implements ->scr_read.
5401 *
5402 * LOCKING:
5403 * None if @link is ap->link. Kernel thread context otherwise.
5404 *
5405 * RETURNS:
5406 * 0 on success, negative errno on failure.
5407 */
5408 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5409 {
5410 if (ata_is_host_link(link)) {
5411 if (sata_scr_valid(link))
5412 return link->ap->ops->scr_write(link, reg, val);
5413 return -EOPNOTSUPP;
5414 }
5415
5416 return sata_pmp_scr_write(link, reg, val);
5417 }
5418
5419 /**
5420 * sata_scr_write_flush - write SCR register of the specified port and flush
5421 * @link: ATA link to write SCR for
5422 * @reg: SCR to write
5423 * @val: value to write
5424 *
5425 * This function is identical to sata_scr_write() except that this
5426 * function performs flush after writing to the register.
5427 *
5428 * LOCKING:
5429 * None if @link is ap->link. Kernel thread context otherwise.
5430 *
5431 * RETURNS:
5432 * 0 on success, negative errno on failure.
5433 */
5434 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5435 {
5436 if (ata_is_host_link(link)) {
5437 int rc;
5438
5439 if (sata_scr_valid(link)) {
5440 rc = link->ap->ops->scr_write(link, reg, val);
5441 if (rc == 0)
5442 rc = link->ap->ops->scr_read(link, reg, &val);
5443 return rc;
5444 }
5445 return -EOPNOTSUPP;
5446 }
5447
5448 return sata_pmp_scr_write(link, reg, val);
5449 }
5450
5451 /**
5452 * ata_phys_link_online - test whether the given link is online
5453 * @link: ATA link to test
5454 *
5455 * Test whether @link is online. Note that this function returns
5456 * 0 if online status of @link cannot be obtained, so
5457 * ata_link_online(link) != !ata_link_offline(link).
5458 *
5459 * LOCKING:
5460 * None.
5461 *
5462 * RETURNS:
5463 * True if the port online status is available and online.
5464 */
5465 bool ata_phys_link_online(struct ata_link *link)
5466 {
5467 u32 sstatus;
5468
5469 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5470 ata_sstatus_online(sstatus))
5471 return true;
5472 return false;
5473 }
5474
5475 /**
5476 * ata_phys_link_offline - test whether the given link is offline
5477 * @link: ATA link to test
5478 *
5479 * Test whether @link is offline. Note that this function
5480 * returns 0 if offline status of @link cannot be obtained, so
5481 * ata_link_online(link) != !ata_link_offline(link).
5482 *
5483 * LOCKING:
5484 * None.
5485 *
5486 * RETURNS:
5487 * True if the port offline status is available and offline.
5488 */
5489 bool ata_phys_link_offline(struct ata_link *link)
5490 {
5491 u32 sstatus;
5492
5493 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5494 !ata_sstatus_online(sstatus))
5495 return true;
5496 return false;
5497 }
5498
5499 /**
5500 * ata_link_online - test whether the given link is online
5501 * @link: ATA link to test
5502 *
5503 * Test whether @link is online. This is identical to
5504 * ata_phys_link_online() when there's no slave link. When
5505 * there's a slave link, this function should only be called on
5506 * the master link and will return true if any of M/S links is
5507 * online.
5508 *
5509 * LOCKING:
5510 * None.
5511 *
5512 * RETURNS:
5513 * True if the port online status is available and online.
5514 */
5515 bool ata_link_online(struct ata_link *link)
5516 {
5517 struct ata_link *slave = link->ap->slave_link;
5518
5519 WARN_ON(link == slave); /* shouldn't be called on slave link */
5520
5521 return ata_phys_link_online(link) ||
5522 (slave && ata_phys_link_online(slave));
5523 }
5524
5525 /**
5526 * ata_link_offline - test whether the given link is offline
5527 * @link: ATA link to test
5528 *
5529 * Test whether @link is offline. This is identical to
5530 * ata_phys_link_offline() when there's no slave link. When
5531 * there's a slave link, this function should only be called on
5532 * the master link and will return true if both M/S links are
5533 * offline.
5534 *
5535 * LOCKING:
5536 * None.
5537 *
5538 * RETURNS:
5539 * True if the port offline status is available and offline.
5540 */
5541 bool ata_link_offline(struct ata_link *link)
5542 {
5543 struct ata_link *slave = link->ap->slave_link;
5544
5545 WARN_ON(link == slave); /* shouldn't be called on slave link */
5546
5547 return ata_phys_link_offline(link) &&
5548 (!slave || ata_phys_link_offline(slave));
5549 }
5550
5551 #ifdef CONFIG_PM
5552 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5553 unsigned int action, unsigned int ehi_flags,
5554 bool async)
5555 {
5556 struct ata_link *link;
5557 unsigned long flags;
5558
5559 /* Previous resume operation might still be in
5560 * progress. Wait for PM_PENDING to clear.
5561 */
5562 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5563 ata_port_wait_eh(ap);
5564 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5565 }
5566
5567 /* request PM ops to EH */
5568 spin_lock_irqsave(ap->lock, flags);
5569
5570 ap->pm_mesg = mesg;
5571 ap->pflags |= ATA_PFLAG_PM_PENDING;
5572 ata_for_each_link(link, ap, HOST_FIRST) {
5573 link->eh_info.action |= action;
5574 link->eh_info.flags |= ehi_flags;
5575 }
5576
5577 ata_port_schedule_eh(ap);
5578
5579 spin_unlock_irqrestore(ap->lock, flags);
5580
5581 if (!async) {
5582 ata_port_wait_eh(ap);
5583 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5584 }
5585 }
5586
5587 /*
5588 * On some hardware, device fails to respond after spun down for suspend. As
5589 * the device won't be used before being resumed, we don't need to touch the
5590 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
5591 *
5592 * http://thread.gmane.org/gmane.linux.ide/46764
5593 */
5594 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5595 | ATA_EHI_NO_AUTOPSY
5596 | ATA_EHI_NO_RECOVERY;
5597
5598 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5599 {
5600 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5601 }
5602
5603 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5604 {
5605 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5606 }
5607
5608 static int ata_port_pm_suspend(struct device *dev)
5609 {
5610 struct ata_port *ap = to_ata_port(dev);
5611
5612 if (pm_runtime_suspended(dev))
5613 return 0;
5614
5615 ata_port_suspend(ap, PMSG_SUSPEND);
5616 return 0;
5617 }
5618
5619 static int ata_port_pm_freeze(struct device *dev)
5620 {
5621 struct ata_port *ap = to_ata_port(dev);
5622
5623 if (pm_runtime_suspended(dev))
5624 return 0;
5625
5626 ata_port_suspend(ap, PMSG_FREEZE);
5627 return 0;
5628 }
5629
5630 static int ata_port_pm_poweroff(struct device *dev)
5631 {
5632 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5633 return 0;
5634 }
5635
5636 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5637 | ATA_EHI_QUIET;
5638
5639 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5640 {
5641 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5642 }
5643
5644 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5645 {
5646 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5647 }
5648
5649 static int ata_port_pm_resume(struct device *dev)
5650 {
5651 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5652 pm_runtime_disable(dev);
5653 pm_runtime_set_active(dev);
5654 pm_runtime_enable(dev);
5655 return 0;
5656 }
5657
5658 /*
5659 * For ODDs, the upper layer will poll for media change every few seconds,
5660 * which will make it enter and leave suspend state every few seconds. And
5661 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5662 * is very little and the ODD may malfunction after constantly being reset.
5663 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5664 * ODD is attached to the port.
5665 */
5666 static int ata_port_runtime_idle(struct device *dev)
5667 {
5668 struct ata_port *ap = to_ata_port(dev);
5669 struct ata_link *link;
5670 struct ata_device *adev;
5671
5672 ata_for_each_link(link, ap, HOST_FIRST) {
5673 ata_for_each_dev(adev, link, ENABLED)
5674 if (adev->class == ATA_DEV_ATAPI &&
5675 !zpodd_dev_enabled(adev))
5676 return -EBUSY;
5677 }
5678
5679 return 0;
5680 }
5681
5682 static int ata_port_runtime_suspend(struct device *dev)
5683 {
5684 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5685 return 0;
5686 }
5687
5688 static int ata_port_runtime_resume(struct device *dev)
5689 {
5690 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5691 return 0;
5692 }
5693
5694 static const struct dev_pm_ops ata_port_pm_ops = {
5695 .suspend = ata_port_pm_suspend,
5696 .resume = ata_port_pm_resume,
5697 .freeze = ata_port_pm_freeze,
5698 .thaw = ata_port_pm_resume,
5699 .poweroff = ata_port_pm_poweroff,
5700 .restore = ata_port_pm_resume,
5701
5702 .runtime_suspend = ata_port_runtime_suspend,
5703 .runtime_resume = ata_port_runtime_resume,
5704 .runtime_idle = ata_port_runtime_idle,
5705 };
5706
5707 /* sas ports don't participate in pm runtime management of ata_ports,
5708 * and need to resume ata devices at the domain level, not the per-port
5709 * level. sas suspend/resume is async to allow parallel port recovery
5710 * since sas has multiple ata_port instances per Scsi_Host.
5711 */
5712 void ata_sas_port_suspend(struct ata_port *ap)
5713 {
5714 ata_port_suspend_async(ap, PMSG_SUSPEND);
5715 }
5716 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5717
5718 void ata_sas_port_resume(struct ata_port *ap)
5719 {
5720 ata_port_resume_async(ap, PMSG_RESUME);
5721 }
5722 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5723
5724 /**
5725 * ata_host_suspend - suspend host
5726 * @host: host to suspend
5727 * @mesg: PM message
5728 *
5729 * Suspend @host. Actual operation is performed by port suspend.
5730 */
5731 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5732 {
5733 host->dev->power.power_state = mesg;
5734 return 0;
5735 }
5736
5737 /**
5738 * ata_host_resume - resume host
5739 * @host: host to resume
5740 *
5741 * Resume @host. Actual operation is performed by port resume.
5742 */
5743 void ata_host_resume(struct ata_host *host)
5744 {
5745 host->dev->power.power_state = PMSG_ON;
5746 }
5747 #endif
5748
5749 struct device_type ata_port_type = {
5750 .name = "ata_port",
5751 #ifdef CONFIG_PM
5752 .pm = &ata_port_pm_ops,
5753 #endif
5754 };
5755
5756 /**
5757 * ata_dev_init - Initialize an ata_device structure
5758 * @dev: Device structure to initialize
5759 *
5760 * Initialize @dev in preparation for probing.
5761 *
5762 * LOCKING:
5763 * Inherited from caller.
5764 */
5765 void ata_dev_init(struct ata_device *dev)
5766 {
5767 struct ata_link *link = ata_dev_phys_link(dev);
5768 struct ata_port *ap = link->ap;
5769 unsigned long flags;
5770
5771 /* SATA spd limit is bound to the attached device, reset together */
5772 link->sata_spd_limit = link->hw_sata_spd_limit;
5773 link->sata_spd = 0;
5774
5775 /* High bits of dev->flags are used to record warm plug
5776 * requests which occur asynchronously. Synchronize using
5777 * host lock.
5778 */
5779 spin_lock_irqsave(ap->lock, flags);
5780 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5781 dev->horkage = 0;
5782 spin_unlock_irqrestore(ap->lock, flags);
5783
5784 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5785 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5786 dev->pio_mask = UINT_MAX;
5787 dev->mwdma_mask = UINT_MAX;
5788 dev->udma_mask = UINT_MAX;
5789 }
5790
5791 /**
5792 * ata_link_init - Initialize an ata_link structure
5793 * @ap: ATA port link is attached to
5794 * @link: Link structure to initialize
5795 * @pmp: Port multiplier port number
5796 *
5797 * Initialize @link.
5798 *
5799 * LOCKING:
5800 * Kernel thread context (may sleep)
5801 */
5802 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5803 {
5804 int i;
5805
5806 /* clear everything except for devices */
5807 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5808 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5809
5810 link->ap = ap;
5811 link->pmp = pmp;
5812 link->active_tag = ATA_TAG_POISON;
5813 link->hw_sata_spd_limit = UINT_MAX;
5814
5815 /* can't use iterator, ap isn't initialized yet */
5816 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5817 struct ata_device *dev = &link->device[i];
5818
5819 dev->link = link;
5820 dev->devno = dev - link->device;
5821 #ifdef CONFIG_ATA_ACPI
5822 dev->gtf_filter = ata_acpi_gtf_filter;
5823 #endif
5824 ata_dev_init(dev);
5825 }
5826 }
5827
5828 /**
5829 * sata_link_init_spd - Initialize link->sata_spd_limit
5830 * @link: Link to configure sata_spd_limit for
5831 *
5832 * Initialize @link->[hw_]sata_spd_limit to the currently
5833 * configured value.
5834 *
5835 * LOCKING:
5836 * Kernel thread context (may sleep).
5837 *
5838 * RETURNS:
5839 * 0 on success, -errno on failure.
5840 */
5841 int sata_link_init_spd(struct ata_link *link)
5842 {
5843 u8 spd;
5844 int rc;
5845
5846 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5847 if (rc)
5848 return rc;
5849
5850 spd = (link->saved_scontrol >> 4) & 0xf;
5851 if (spd)
5852 link->hw_sata_spd_limit &= (1 << spd) - 1;
5853
5854 ata_force_link_limits(link);
5855
5856 link->sata_spd_limit = link->hw_sata_spd_limit;
5857
5858 return 0;
5859 }
5860
5861 /**
5862 * ata_port_alloc - allocate and initialize basic ATA port resources
5863 * @host: ATA host this allocated port belongs to
5864 *
5865 * Allocate and initialize basic ATA port resources.
5866 *
5867 * RETURNS:
5868 * Allocate ATA port on success, NULL on failure.
5869 *
5870 * LOCKING:
5871 * Inherited from calling layer (may sleep).
5872 */
5873 struct ata_port *ata_port_alloc(struct ata_host *host)
5874 {
5875 struct ata_port *ap;
5876
5877 DPRINTK("ENTER\n");
5878
5879 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5880 if (!ap)
5881 return NULL;
5882
5883 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5884 ap->lock = &host->lock;
5885 ap->print_id = -1;
5886 ap->local_port_no = -1;
5887 ap->host = host;
5888 ap->dev = host->dev;
5889
5890 #if defined(ATA_VERBOSE_DEBUG)
5891 /* turn on all debugging levels */
5892 ap->msg_enable = 0x00FF;
5893 #elif defined(ATA_DEBUG)
5894 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5895 #else
5896 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5897 #endif
5898
5899 mutex_init(&ap->scsi_scan_mutex);
5900 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5901 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5902 INIT_LIST_HEAD(&ap->eh_done_q);
5903 init_waitqueue_head(&ap->eh_wait_q);
5904 init_completion(&ap->park_req_pending);
5905 init_timer_deferrable(&ap->fastdrain_timer);
5906 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5907 ap->fastdrain_timer.data = (unsigned long)ap;
5908
5909 ap->cbl = ATA_CBL_NONE;
5910
5911 ata_link_init(ap, &ap->link, 0);
5912
5913 #ifdef ATA_IRQ_TRAP
5914 ap->stats.unhandled_irq = 1;
5915 ap->stats.idle_irq = 1;
5916 #endif
5917 ata_sff_port_init(ap);
5918
5919 return ap;
5920 }
5921
5922 static void ata_host_release(struct device *gendev, void *res)
5923 {
5924 struct ata_host *host = dev_get_drvdata(gendev);
5925 int i;
5926
5927 for (i = 0; i < host->n_ports; i++) {
5928 struct ata_port *ap = host->ports[i];
5929
5930 if (!ap)
5931 continue;
5932
5933 if (ap->scsi_host)
5934 scsi_host_put(ap->scsi_host);
5935
5936 kfree(ap->pmp_link);
5937 kfree(ap->slave_link);
5938 kfree(ap);
5939 host->ports[i] = NULL;
5940 }
5941
5942 dev_set_drvdata(gendev, NULL);
5943 }
5944
5945 /**
5946 * ata_host_alloc - allocate and init basic ATA host resources
5947 * @dev: generic device this host is associated with
5948 * @max_ports: maximum number of ATA ports associated with this host
5949 *
5950 * Allocate and initialize basic ATA host resources. LLD calls
5951 * this function to allocate a host, initializes it fully and
5952 * attaches it using ata_host_register().
5953 *
5954 * @max_ports ports are allocated and host->n_ports is
5955 * initialized to @max_ports. The caller is allowed to decrease
5956 * host->n_ports before calling ata_host_register(). The unused
5957 * ports will be automatically freed on registration.
5958 *
5959 * RETURNS:
5960 * Allocate ATA host on success, NULL on failure.
5961 *
5962 * LOCKING:
5963 * Inherited from calling layer (may sleep).
5964 */
5965 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5966 {
5967 struct ata_host *host;
5968 size_t sz;
5969 int i;
5970
5971 DPRINTK("ENTER\n");
5972
5973 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5974 return NULL;
5975
5976 /* alloc a container for our list of ATA ports (buses) */
5977 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5978 /* alloc a container for our list of ATA ports (buses) */
5979 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5980 if (!host)
5981 goto err_out;
5982
5983 devres_add(dev, host);
5984 dev_set_drvdata(dev, host);
5985
5986 spin_lock_init(&host->lock);
5987 mutex_init(&host->eh_mutex);
5988 host->dev = dev;
5989 host->n_ports = max_ports;
5990
5991 /* allocate ports bound to this host */
5992 for (i = 0; i < max_ports; i++) {
5993 struct ata_port *ap;
5994
5995 ap = ata_port_alloc(host);
5996 if (!ap)
5997 goto err_out;
5998
5999 ap->port_no = i;
6000 host->ports[i] = ap;
6001 }
6002
6003 devres_remove_group(dev, NULL);
6004 return host;
6005
6006 err_out:
6007 devres_release_group(dev, NULL);
6008 return NULL;
6009 }
6010
6011 /**
6012 * ata_host_alloc_pinfo - alloc host and init with port_info array
6013 * @dev: generic device this host is associated with
6014 * @ppi: array of ATA port_info to initialize host with
6015 * @n_ports: number of ATA ports attached to this host
6016 *
6017 * Allocate ATA host and initialize with info from @ppi. If NULL
6018 * terminated, @ppi may contain fewer entries than @n_ports. The
6019 * last entry will be used for the remaining ports.
6020 *
6021 * RETURNS:
6022 * Allocate ATA host on success, NULL on failure.
6023 *
6024 * LOCKING:
6025 * Inherited from calling layer (may sleep).
6026 */
6027 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6028 const struct ata_port_info * const * ppi,
6029 int n_ports)
6030 {
6031 const struct ata_port_info *pi;
6032 struct ata_host *host;
6033 int i, j;
6034
6035 host = ata_host_alloc(dev, n_ports);
6036 if (!host)
6037 return NULL;
6038
6039 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6040 struct ata_port *ap = host->ports[i];
6041
6042 if (ppi[j])
6043 pi = ppi[j++];
6044
6045 ap->pio_mask = pi->pio_mask;
6046 ap->mwdma_mask = pi->mwdma_mask;
6047 ap->udma_mask = pi->udma_mask;
6048 ap->flags |= pi->flags;
6049 ap->link.flags |= pi->link_flags;
6050 ap->ops = pi->port_ops;
6051
6052 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6053 host->ops = pi->port_ops;
6054 }
6055
6056 return host;
6057 }
6058
6059 /**
6060 * ata_slave_link_init - initialize slave link
6061 * @ap: port to initialize slave link for
6062 *
6063 * Create and initialize slave link for @ap. This enables slave
6064 * link handling on the port.
6065 *
6066 * In libata, a port contains links and a link contains devices.
6067 * There is single host link but if a PMP is attached to it,
6068 * there can be multiple fan-out links. On SATA, there's usually
6069 * a single device connected to a link but PATA and SATA
6070 * controllers emulating TF based interface can have two - master
6071 * and slave.
6072 *
6073 * However, there are a few controllers which don't fit into this
6074 * abstraction too well - SATA controllers which emulate TF
6075 * interface with both master and slave devices but also have
6076 * separate SCR register sets for each device. These controllers
6077 * need separate links for physical link handling
6078 * (e.g. onlineness, link speed) but should be treated like a
6079 * traditional M/S controller for everything else (e.g. command
6080 * issue, softreset).
6081 *
6082 * slave_link is libata's way of handling this class of
6083 * controllers without impacting core layer too much. For
6084 * anything other than physical link handling, the default host
6085 * link is used for both master and slave. For physical link
6086 * handling, separate @ap->slave_link is used. All dirty details
6087 * are implemented inside libata core layer. From LLD's POV, the
6088 * only difference is that prereset, hardreset and postreset are
6089 * called once more for the slave link, so the reset sequence
6090 * looks like the following.
6091 *
6092 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
6093 * softreset(M) -> postreset(M) -> postreset(S)
6094 *
6095 * Note that softreset is called only for the master. Softreset
6096 * resets both M/S by definition, so SRST on master should handle
6097 * both (the standard method will work just fine).
6098 *
6099 * LOCKING:
6100 * Should be called before host is registered.
6101 *
6102 * RETURNS:
6103 * 0 on success, -errno on failure.
6104 */
6105 int ata_slave_link_init(struct ata_port *ap)
6106 {
6107 struct ata_link *link;
6108
6109 WARN_ON(ap->slave_link);
6110 WARN_ON(ap->flags & ATA_FLAG_PMP);
6111
6112 link = kzalloc(sizeof(*link), GFP_KERNEL);
6113 if (!link)
6114 return -ENOMEM;
6115
6116 ata_link_init(ap, link, 1);
6117 ap->slave_link = link;
6118 return 0;
6119 }
6120
6121 static void ata_host_stop(struct device *gendev, void *res)
6122 {
6123 struct ata_host *host = dev_get_drvdata(gendev);
6124 int i;
6125
6126 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6127
6128 for (i = 0; i < host->n_ports; i++) {
6129 struct ata_port *ap = host->ports[i];
6130
6131 if (ap->ops->port_stop)
6132 ap->ops->port_stop(ap);
6133 }
6134
6135 if (host->ops->host_stop)
6136 host->ops->host_stop(host);
6137 }
6138
6139 /**
6140 * ata_finalize_port_ops - finalize ata_port_operations
6141 * @ops: ata_port_operations to finalize
6142 *
6143 * An ata_port_operations can inherit from another ops and that
6144 * ops can again inherit from another. This can go on as many
6145 * times as necessary as long as there is no loop in the
6146 * inheritance chain.
6147 *
6148 * Ops tables are finalized when the host is started. NULL or
6149 * unspecified entries are inherited from the closet ancestor
6150 * which has the method and the entry is populated with it.
6151 * After finalization, the ops table directly points to all the
6152 * methods and ->inherits is no longer necessary and cleared.
6153 *
6154 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
6155 *
6156 * LOCKING:
6157 * None.
6158 */
6159 static void ata_finalize_port_ops(struct ata_port_operations *ops)
6160 {
6161 static DEFINE_SPINLOCK(lock);
6162 const struct ata_port_operations *cur;
6163 void **begin = (void **)ops;
6164 void **end = (void **)&ops->inherits;
6165 void **pp;
6166
6167 if (!ops || !ops->inherits)
6168 return;
6169
6170 spin_lock(&lock);
6171
6172 for (cur = ops->inherits; cur; cur = cur->inherits) {
6173 void **inherit = (void **)cur;
6174
6175 for (pp = begin; pp < end; pp++, inherit++)
6176 if (!*pp)
6177 *pp = *inherit;
6178 }
6179
6180 for (pp = begin; pp < end; pp++)
6181 if (IS_ERR(*pp))
6182 *pp = NULL;
6183
6184 ops->inherits = NULL;
6185
6186 spin_unlock(&lock);
6187 }
6188
6189 /**
6190 * ata_host_start - start and freeze ports of an ATA host
6191 * @host: ATA host to start ports for
6192 *
6193 * Start and then freeze ports of @host. Started status is
6194 * recorded in host->flags, so this function can be called
6195 * multiple times. Ports are guaranteed to get started only
6196 * once. If host->ops isn't initialized yet, its set to the
6197 * first non-dummy port ops.
6198 *
6199 * LOCKING:
6200 * Inherited from calling layer (may sleep).
6201 *
6202 * RETURNS:
6203 * 0 if all ports are started successfully, -errno otherwise.
6204 */
6205 int ata_host_start(struct ata_host *host)
6206 {
6207 int have_stop = 0;
6208 void *start_dr = NULL;
6209 int i, rc;
6210
6211 if (host->flags & ATA_HOST_STARTED)
6212 return 0;
6213
6214 ata_finalize_port_ops(host->ops);
6215
6216 for (i = 0; i < host->n_ports; i++) {
6217 struct ata_port *ap = host->ports[i];
6218
6219 ata_finalize_port_ops(ap->ops);
6220
6221 if (!host->ops && !ata_port_is_dummy(ap))
6222 host->ops = ap->ops;
6223
6224 if (ap->ops->port_stop)
6225 have_stop = 1;
6226 }
6227
6228 if (host->ops->host_stop)
6229 have_stop = 1;
6230
6231 if (have_stop) {
6232 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6233 if (!start_dr)
6234 return -ENOMEM;
6235 }
6236
6237 for (i = 0; i < host->n_ports; i++) {
6238 struct ata_port *ap = host->ports[i];
6239
6240 if (ap->ops->port_start) {
6241 rc = ap->ops->port_start(ap);
6242 if (rc) {
6243 if (rc != -ENODEV)
6244 dev_err(host->dev,
6245 "failed to start port %d (errno=%d)\n",
6246 i, rc);
6247 goto err_out;
6248 }
6249 }
6250 ata_eh_freeze_port(ap);
6251 }
6252
6253 if (start_dr)
6254 devres_add(host->dev, start_dr);
6255 host->flags |= ATA_HOST_STARTED;
6256 return 0;
6257
6258 err_out:
6259 while (--i >= 0) {
6260 struct ata_port *ap = host->ports[i];
6261
6262 if (ap->ops->port_stop)
6263 ap->ops->port_stop(ap);
6264 }
6265 devres_free(start_dr);
6266 return rc;
6267 }
6268
6269 /**
6270 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6271 * @host: host to initialize
6272 * @dev: device host is attached to
6273 * @ops: port_ops
6274 *
6275 */
6276 void ata_host_init(struct ata_host *host, struct device *dev,
6277 struct ata_port_operations *ops)
6278 {
6279 spin_lock_init(&host->lock);
6280 mutex_init(&host->eh_mutex);
6281 host->n_tags = ATA_MAX_QUEUE - 1;
6282 host->dev = dev;
6283 host->ops = ops;
6284 }
6285
6286 void __ata_port_probe(struct ata_port *ap)
6287 {
6288 struct ata_eh_info *ehi = &ap->link.eh_info;
6289 unsigned long flags;
6290
6291 /* kick EH for boot probing */
6292 spin_lock_irqsave(ap->lock, flags);
6293
6294 ehi->probe_mask |= ATA_ALL_DEVICES;
6295 ehi->action |= ATA_EH_RESET;
6296 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6297
6298 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6299 ap->pflags |= ATA_PFLAG_LOADING;
6300 ata_port_schedule_eh(ap);
6301
6302 spin_unlock_irqrestore(ap->lock, flags);
6303 }
6304
6305 int ata_port_probe(struct ata_port *ap)
6306 {
6307 int rc = 0;
6308
6309 if (ap->ops->error_handler) {
6310 __ata_port_probe(ap);
6311 ata_port_wait_eh(ap);
6312 } else {
6313 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6314 rc = ata_bus_probe(ap);
6315 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6316 }
6317 return rc;
6318 }
6319
6320
6321 static void async_port_probe(void *data, async_cookie_t cookie)
6322 {
6323 struct ata_port *ap = data;
6324
6325 /*
6326 * If we're not allowed to scan this host in parallel,
6327 * we need to wait until all previous scans have completed
6328 * before going further.
6329 * Jeff Garzik says this is only within a controller, so we
6330 * don't need to wait for port 0, only for later ports.
6331 */
6332 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6333 async_synchronize_cookie(cookie);
6334
6335 (void)ata_port_probe(ap);
6336
6337 /* in order to keep device order, we need to synchronize at this point */
6338 async_synchronize_cookie(cookie);
6339
6340 ata_scsi_scan_host(ap, 1);
6341 }
6342
6343 /**
6344 * ata_host_register - register initialized ATA host
6345 * @host: ATA host to register
6346 * @sht: template for SCSI host
6347 *
6348 * Register initialized ATA host. @host is allocated using
6349 * ata_host_alloc() and fully initialized by LLD. This function
6350 * starts ports, registers @host with ATA and SCSI layers and
6351 * probe registered devices.
6352 *
6353 * LOCKING:
6354 * Inherited from calling layer (may sleep).
6355 *
6356 * RETURNS:
6357 * 0 on success, -errno otherwise.
6358 */
6359 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6360 {
6361 int i, rc;
6362
6363 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6364
6365 /* host must have been started */
6366 if (!(host->flags & ATA_HOST_STARTED)) {
6367 dev_err(host->dev, "BUG: trying to register unstarted host\n");
6368 WARN_ON(1);
6369 return -EINVAL;
6370 }
6371
6372 /* Blow away unused ports. This happens when LLD can't
6373 * determine the exact number of ports to allocate at
6374 * allocation time.
6375 */
6376 for (i = host->n_ports; host->ports[i]; i++)
6377 kfree(host->ports[i]);
6378
6379 /* give ports names and add SCSI hosts */
6380 for (i = 0; i < host->n_ports; i++) {
6381 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6382 host->ports[i]->local_port_no = i + 1;
6383 }
6384
6385 /* Create associated sysfs transport objects */
6386 for (i = 0; i < host->n_ports; i++) {
6387 rc = ata_tport_add(host->dev,host->ports[i]);
6388 if (rc) {
6389 goto err_tadd;
6390 }
6391 }
6392
6393 rc = ata_scsi_add_hosts(host, sht);
6394 if (rc)
6395 goto err_tadd;
6396
6397 /* set cable, sata_spd_limit and report */
6398 for (i = 0; i < host->n_ports; i++) {
6399 struct ata_port *ap = host->ports[i];
6400 unsigned long xfer_mask;
6401
6402 /* set SATA cable type if still unset */
6403 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6404 ap->cbl = ATA_CBL_SATA;
6405
6406 /* init sata_spd_limit to the current value */
6407 sata_link_init_spd(&ap->link);
6408 if (ap->slave_link)
6409 sata_link_init_spd(ap->slave_link);
6410
6411 /* print per-port info to dmesg */
6412 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6413 ap->udma_mask);
6414
6415 if (!ata_port_is_dummy(ap)) {
6416 ata_port_info(ap, "%cATA max %s %s\n",
6417 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6418 ata_mode_string(xfer_mask),
6419 ap->link.eh_info.desc);
6420 ata_ehi_clear_desc(&ap->link.eh_info);
6421 } else
6422 ata_port_info(ap, "DUMMY\n");
6423 }
6424
6425 /* perform each probe asynchronously */
6426 for (i = 0; i < host->n_ports; i++) {
6427 struct ata_port *ap = host->ports[i];
6428 async_schedule(async_port_probe, ap);
6429 }
6430
6431 return 0;
6432
6433 err_tadd:
6434 while (--i >= 0) {
6435 ata_tport_delete(host->ports[i]);
6436 }
6437 return rc;
6438
6439 }
6440
6441 /**
6442 * ata_host_activate - start host, request IRQ and register it
6443 * @host: target ATA host
6444 * @irq: IRQ to request
6445 * @irq_handler: irq_handler used when requesting IRQ
6446 * @irq_flags: irq_flags used when requesting IRQ
6447 * @sht: scsi_host_template to use when registering the host
6448 *
6449 * After allocating an ATA host and initializing it, most libata
6450 * LLDs perform three steps to activate the host - start host,
6451 * request IRQ and register it. This helper takes necessary
6452 * arguments and performs the three steps in one go.
6453 *
6454 * An invalid IRQ skips the IRQ registration and expects the host to
6455 * have set polling mode on the port. In this case, @irq_handler
6456 * should be NULL.
6457 *
6458 * LOCKING:
6459 * Inherited from calling layer (may sleep).
6460 *
6461 * RETURNS:
6462 * 0 on success, -errno otherwise.
6463 */
6464 int ata_host_activate(struct ata_host *host, int irq,
6465 irq_handler_t irq_handler, unsigned long irq_flags,
6466 struct scsi_host_template *sht)
6467 {
6468 int i, rc;
6469 char *irq_desc;
6470
6471 rc = ata_host_start(host);
6472 if (rc)
6473 return rc;
6474
6475 /* Special case for polling mode */
6476 if (!irq) {
6477 WARN_ON(irq_handler);
6478 return ata_host_register(host, sht);
6479 }
6480
6481 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6482 dev_driver_string(host->dev),
6483 dev_name(host->dev));
6484 if (!irq_desc)
6485 return -ENOMEM;
6486
6487 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6488 irq_desc, host);
6489 if (rc)
6490 return rc;
6491
6492 for (i = 0; i < host->n_ports; i++)
6493 ata_port_desc(host->ports[i], "irq %d", irq);
6494
6495 rc = ata_host_register(host, sht);
6496 /* if failed, just free the IRQ and leave ports alone */
6497 if (rc)
6498 devm_free_irq(host->dev, irq, host);
6499
6500 return rc;
6501 }
6502
6503 /**
6504 * ata_port_detach - Detach ATA port in preparation of device removal
6505 * @ap: ATA port to be detached
6506 *
6507 * Detach all ATA devices and the associated SCSI devices of @ap;
6508 * then, remove the associated SCSI host. @ap is guaranteed to
6509 * be quiescent on return from this function.
6510 *
6511 * LOCKING:
6512 * Kernel thread context (may sleep).
6513 */
6514 static void ata_port_detach(struct ata_port *ap)
6515 {
6516 unsigned long flags;
6517 struct ata_link *link;
6518 struct ata_device *dev;
6519
6520 if (!ap->ops->error_handler)
6521 goto skip_eh;
6522
6523 /* tell EH we're leaving & flush EH */
6524 spin_lock_irqsave(ap->lock, flags);
6525 ap->pflags |= ATA_PFLAG_UNLOADING;
6526 ata_port_schedule_eh(ap);
6527 spin_unlock_irqrestore(ap->lock, flags);
6528
6529 /* wait till EH commits suicide */
6530 ata_port_wait_eh(ap);
6531
6532 /* it better be dead now */
6533 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6534
6535 cancel_delayed_work_sync(&ap->hotplug_task);
6536
6537 skip_eh:
6538 /* clean up zpodd on port removal */
6539 ata_for_each_link(link, ap, HOST_FIRST) {
6540 ata_for_each_dev(dev, link, ALL) {
6541 if (zpodd_dev_enabled(dev))
6542 zpodd_exit(dev);
6543 }
6544 }
6545 if (ap->pmp_link) {
6546 int i;
6547 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6548 ata_tlink_delete(&ap->pmp_link[i]);
6549 }
6550 /* remove the associated SCSI host */
6551 scsi_remove_host(ap->scsi_host);
6552 ata_tport_delete(ap);
6553 }
6554
6555 /**
6556 * ata_host_detach - Detach all ports of an ATA host
6557 * @host: Host to detach
6558 *
6559 * Detach all ports of @host.
6560 *
6561 * LOCKING:
6562 * Kernel thread context (may sleep).
6563 */
6564 void ata_host_detach(struct ata_host *host)
6565 {
6566 int i;
6567
6568 for (i = 0; i < host->n_ports; i++)
6569 ata_port_detach(host->ports[i]);
6570
6571 /* the host is dead now, dissociate ACPI */
6572 ata_acpi_dissociate(host);
6573 }
6574
6575 #ifdef CONFIG_PCI
6576
6577 /**
6578 * ata_pci_remove_one - PCI layer callback for device removal
6579 * @pdev: PCI device that was removed
6580 *
6581 * PCI layer indicates to libata via this hook that hot-unplug or
6582 * module unload event has occurred. Detach all ports. Resource
6583 * release is handled via devres.
6584 *
6585 * LOCKING:
6586 * Inherited from PCI layer (may sleep).
6587 */
6588 void ata_pci_remove_one(struct pci_dev *pdev)
6589 {
6590 struct ata_host *host = pci_get_drvdata(pdev);
6591
6592 ata_host_detach(host);
6593 }
6594
6595 /* move to PCI subsystem */
6596 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6597 {
6598 unsigned long tmp = 0;
6599
6600 switch (bits->width) {
6601 case 1: {
6602 u8 tmp8 = 0;
6603 pci_read_config_byte(pdev, bits->reg, &tmp8);
6604 tmp = tmp8;
6605 break;
6606 }
6607 case 2: {
6608 u16 tmp16 = 0;
6609 pci_read_config_word(pdev, bits->reg, &tmp16);
6610 tmp = tmp16;
6611 break;
6612 }
6613 case 4: {
6614 u32 tmp32 = 0;
6615 pci_read_config_dword(pdev, bits->reg, &tmp32);
6616 tmp = tmp32;
6617 break;
6618 }
6619
6620 default:
6621 return -EINVAL;
6622 }
6623
6624 tmp &= bits->mask;
6625
6626 return (tmp == bits->val) ? 1 : 0;
6627 }
6628
6629 #ifdef CONFIG_PM
6630 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6631 {
6632 pci_save_state(pdev);
6633 pci_disable_device(pdev);
6634
6635 if (mesg.event & PM_EVENT_SLEEP)
6636 pci_set_power_state(pdev, PCI_D3hot);
6637 }
6638
6639 int ata_pci_device_do_resume(struct pci_dev *pdev)
6640 {
6641 int rc;
6642
6643 pci_set_power_state(pdev, PCI_D0);
6644 pci_restore_state(pdev);
6645
6646 rc = pcim_enable_device(pdev);
6647 if (rc) {
6648 dev_err(&pdev->dev,
6649 "failed to enable device after resume (%d)\n", rc);
6650 return rc;
6651 }
6652
6653 pci_set_master(pdev);
6654 return 0;
6655 }
6656
6657 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6658 {
6659 struct ata_host *host = pci_get_drvdata(pdev);
6660 int rc = 0;
6661
6662 rc = ata_host_suspend(host, mesg);
6663 if (rc)
6664 return rc;
6665
6666 ata_pci_device_do_suspend(pdev, mesg);
6667
6668 return 0;
6669 }
6670
6671 int ata_pci_device_resume(struct pci_dev *pdev)
6672 {
6673 struct ata_host *host = pci_get_drvdata(pdev);
6674 int rc;
6675
6676 rc = ata_pci_device_do_resume(pdev);
6677 if (rc == 0)
6678 ata_host_resume(host);
6679 return rc;
6680 }
6681 #endif /* CONFIG_PM */
6682
6683 #endif /* CONFIG_PCI */
6684
6685 /**
6686 * ata_platform_remove_one - Platform layer callback for device removal
6687 * @pdev: Platform device that was removed
6688 *
6689 * Platform layer indicates to libata via this hook that hot-unplug or
6690 * module unload event has occurred. Detach all ports. Resource
6691 * release is handled via devres.
6692 *
6693 * LOCKING:
6694 * Inherited from platform layer (may sleep).
6695 */
6696 int ata_platform_remove_one(struct platform_device *pdev)
6697 {
6698 struct ata_host *host = platform_get_drvdata(pdev);
6699
6700 ata_host_detach(host);
6701
6702 return 0;
6703 }
6704
6705 static int __init ata_parse_force_one(char **cur,
6706 struct ata_force_ent *force_ent,
6707 const char **reason)
6708 {
6709 static const struct ata_force_param force_tbl[] __initconst = {
6710 { "40c", .cbl = ATA_CBL_PATA40 },
6711 { "80c", .cbl = ATA_CBL_PATA80 },
6712 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6713 { "unk", .cbl = ATA_CBL_PATA_UNK },
6714 { "ign", .cbl = ATA_CBL_PATA_IGN },
6715 { "sata", .cbl = ATA_CBL_SATA },
6716 { "1.5Gbps", .spd_limit = 1 },
6717 { "3.0Gbps", .spd_limit = 2 },
6718 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6719 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6720 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
6721 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
6722 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6723 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6724 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6725 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6726 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6727 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6728 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6729 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6730 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6731 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6732 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6733 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6734 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6735 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6736 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6737 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6738 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6739 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6740 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6741 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6742 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6743 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6744 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6745 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6746 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6747 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6748 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6749 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6750 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6751 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6752 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6753 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6754 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6755 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6756 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6757 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6758 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6759 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6760 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6761 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6762 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
6763 };
6764 char *start = *cur, *p = *cur;
6765 char *id, *val, *endp;
6766 const struct ata_force_param *match_fp = NULL;
6767 int nr_matches = 0, i;
6768
6769 /* find where this param ends and update *cur */
6770 while (*p != '\0' && *p != ',')
6771 p++;
6772
6773 if (*p == '\0')
6774 *cur = p;
6775 else
6776 *cur = p + 1;
6777
6778 *p = '\0';
6779
6780 /* parse */
6781 p = strchr(start, ':');
6782 if (!p) {
6783 val = strstrip(start);
6784 goto parse_val;
6785 }
6786 *p = '\0';
6787
6788 id = strstrip(start);
6789 val = strstrip(p + 1);
6790
6791 /* parse id */
6792 p = strchr(id, '.');
6793 if (p) {
6794 *p++ = '\0';
6795 force_ent->device = simple_strtoul(p, &endp, 10);
6796 if (p == endp || *endp != '\0') {
6797 *reason = "invalid device";
6798 return -EINVAL;
6799 }
6800 }
6801
6802 force_ent->port = simple_strtoul(id, &endp, 10);
6803 if (p == endp || *endp != '\0') {
6804 *reason = "invalid port/link";
6805 return -EINVAL;
6806 }
6807
6808 parse_val:
6809 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6810 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6811 const struct ata_force_param *fp = &force_tbl[i];
6812
6813 if (strncasecmp(val, fp->name, strlen(val)))
6814 continue;
6815
6816 nr_matches++;
6817 match_fp = fp;
6818
6819 if (strcasecmp(val, fp->name) == 0) {
6820 nr_matches = 1;
6821 break;
6822 }
6823 }
6824
6825 if (!nr_matches) {
6826 *reason = "unknown value";
6827 return -EINVAL;
6828 }
6829 if (nr_matches > 1) {
6830 *reason = "ambigious value";
6831 return -EINVAL;
6832 }
6833
6834 force_ent->param = *match_fp;
6835
6836 return 0;
6837 }
6838
6839 static void __init ata_parse_force_param(void)
6840 {
6841 int idx = 0, size = 1;
6842 int last_port = -1, last_device = -1;
6843 char *p, *cur, *next;
6844
6845 /* calculate maximum number of params and allocate force_tbl */
6846 for (p = ata_force_param_buf; *p; p++)
6847 if (*p == ',')
6848 size++;
6849
6850 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6851 if (!ata_force_tbl) {
6852 printk(KERN_WARNING "ata: failed to extend force table, "
6853 "libata.force ignored\n");
6854 return;
6855 }
6856
6857 /* parse and populate the table */
6858 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6859 const char *reason = "";
6860 struct ata_force_ent te = { .port = -1, .device = -1 };
6861
6862 next = cur;
6863 if (ata_parse_force_one(&next, &te, &reason)) {
6864 printk(KERN_WARNING "ata: failed to parse force "
6865 "parameter \"%s\" (%s)\n",
6866 cur, reason);
6867 continue;
6868 }
6869
6870 if (te.port == -1) {
6871 te.port = last_port;
6872 te.device = last_device;
6873 }
6874
6875 ata_force_tbl[idx++] = te;
6876
6877 last_port = te.port;
6878 last_device = te.device;
6879 }
6880
6881 ata_force_tbl_size = idx;
6882 }
6883
6884 static int __init ata_init(void)
6885 {
6886 int rc;
6887
6888 ata_parse_force_param();
6889
6890 rc = ata_sff_init();
6891 if (rc) {
6892 kfree(ata_force_tbl);
6893 return rc;
6894 }
6895
6896 libata_transport_init();
6897 ata_scsi_transport_template = ata_attach_transport();
6898 if (!ata_scsi_transport_template) {
6899 ata_sff_exit();
6900 rc = -ENOMEM;
6901 goto err_out;
6902 }
6903
6904 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6905 return 0;
6906
6907 err_out:
6908 return rc;
6909 }
6910
6911 static void __exit ata_exit(void)
6912 {
6913 ata_release_transport(ata_scsi_transport_template);
6914 libata_transport_exit();
6915 ata_sff_exit();
6916 kfree(ata_force_tbl);
6917 }
6918
6919 subsys_initcall(ata_init);
6920 module_exit(ata_exit);
6921
6922 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6923
6924 int ata_ratelimit(void)
6925 {
6926 return __ratelimit(&ratelimit);
6927 }
6928
6929 /**
6930 * ata_msleep - ATA EH owner aware msleep
6931 * @ap: ATA port to attribute the sleep to
6932 * @msecs: duration to sleep in milliseconds
6933 *
6934 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6935 * ownership is released before going to sleep and reacquired
6936 * after the sleep is complete. IOW, other ports sharing the
6937 * @ap->host will be allowed to own the EH while this task is
6938 * sleeping.
6939 *
6940 * LOCKING:
6941 * Might sleep.
6942 */
6943 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6944 {
6945 bool owns_eh = ap && ap->host->eh_owner == current;
6946
6947 if (owns_eh)
6948 ata_eh_release(ap);
6949
6950 if (msecs < 20) {
6951 unsigned long usecs = msecs * USEC_PER_MSEC;
6952 usleep_range(usecs, usecs + 50);
6953 } else {
6954 msleep(msecs);
6955 }
6956
6957 if (owns_eh)
6958 ata_eh_acquire(ap);
6959 }
6960
6961 /**
6962 * ata_wait_register - wait until register value changes
6963 * @ap: ATA port to wait register for, can be NULL
6964 * @reg: IO-mapped register
6965 * @mask: Mask to apply to read register value
6966 * @val: Wait condition
6967 * @interval: polling interval in milliseconds
6968 * @timeout: timeout in milliseconds
6969 *
6970 * Waiting for some bits of register to change is a common
6971 * operation for ATA controllers. This function reads 32bit LE
6972 * IO-mapped register @reg and tests for the following condition.
6973 *
6974 * (*@reg & mask) != val
6975 *
6976 * If the condition is met, it returns; otherwise, the process is
6977 * repeated after @interval_msec until timeout.
6978 *
6979 * LOCKING:
6980 * Kernel thread context (may sleep)
6981 *
6982 * RETURNS:
6983 * The final register value.
6984 */
6985 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6986 unsigned long interval, unsigned long timeout)
6987 {
6988 unsigned long deadline;
6989 u32 tmp;
6990
6991 tmp = ioread32(reg);
6992
6993 /* Calculate timeout _after_ the first read to make sure
6994 * preceding writes reach the controller before starting to
6995 * eat away the timeout.
6996 */
6997 deadline = ata_deadline(jiffies, timeout);
6998
6999 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
7000 ata_msleep(ap, interval);
7001 tmp = ioread32(reg);
7002 }
7003
7004 return tmp;
7005 }
7006
7007 /**
7008 * sata_lpm_ignore_phy_events - test if PHY event should be ignored
7009 * @link: Link receiving the event
7010 *
7011 * Test whether the received PHY event has to be ignored or not.
7012 *
7013 * LOCKING:
7014 * None:
7015 *
7016 * RETURNS:
7017 * True if the event has to be ignored.
7018 */
7019 bool sata_lpm_ignore_phy_events(struct ata_link *link)
7020 {
7021 unsigned long lpm_timeout = link->last_lpm_change +
7022 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
7023
7024 /* if LPM is enabled, PHYRDY doesn't mean anything */
7025 if (link->lpm_policy > ATA_LPM_MAX_POWER)
7026 return true;
7027
7028 /* ignore the first PHY event after the LPM policy changed
7029 * as it is might be spurious
7030 */
7031 if ((link->flags & ATA_LFLAG_CHANGED) &&
7032 time_before(jiffies, lpm_timeout))
7033 return true;
7034
7035 return false;
7036 }
7037 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
7038
7039 /*
7040 * Dummy port_ops
7041 */
7042 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7043 {
7044 return AC_ERR_SYSTEM;
7045 }
7046
7047 static void ata_dummy_error_handler(struct ata_port *ap)
7048 {
7049 /* truly dummy */
7050 }
7051
7052 struct ata_port_operations ata_dummy_port_ops = {
7053 .qc_prep = ata_noop_qc_prep,
7054 .qc_issue = ata_dummy_qc_issue,
7055 .error_handler = ata_dummy_error_handler,
7056 .sched_eh = ata_std_sched_eh,
7057 .end_eh = ata_std_end_eh,
7058 };
7059
7060 const struct ata_port_info ata_dummy_port_info = {
7061 .port_ops = &ata_dummy_port_ops,
7062 };
7063
7064 /*
7065 * Utility print functions
7066 */
7067 void ata_port_printk(const struct ata_port *ap, const char *level,
7068 const char *fmt, ...)
7069 {
7070 struct va_format vaf;
7071 va_list args;
7072
7073 va_start(args, fmt);
7074
7075 vaf.fmt = fmt;
7076 vaf.va = &args;
7077
7078 printk("%sata%u: %pV", level, ap->print_id, &vaf);
7079
7080 va_end(args);
7081 }
7082 EXPORT_SYMBOL(ata_port_printk);
7083
7084 void ata_link_printk(const struct ata_link *link, const char *level,
7085 const char *fmt, ...)
7086 {
7087 struct va_format vaf;
7088 va_list args;
7089
7090 va_start(args, fmt);
7091
7092 vaf.fmt = fmt;
7093 vaf.va = &args;
7094
7095 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
7096 printk("%sata%u.%02u: %pV",
7097 level, link->ap->print_id, link->pmp, &vaf);
7098 else
7099 printk("%sata%u: %pV",
7100 level, link->ap->print_id, &vaf);
7101
7102 va_end(args);
7103 }
7104 EXPORT_SYMBOL(ata_link_printk);
7105
7106 void ata_dev_printk(const struct ata_device *dev, const char *level,
7107 const char *fmt, ...)
7108 {
7109 struct va_format vaf;
7110 va_list args;
7111
7112 va_start(args, fmt);
7113
7114 vaf.fmt = fmt;
7115 vaf.va = &args;
7116
7117 printk("%sata%u.%02u: %pV",
7118 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
7119 &vaf);
7120
7121 va_end(args);
7122 }
7123 EXPORT_SYMBOL(ata_dev_printk);
7124
7125 void ata_print_version(const struct device *dev, const char *version)
7126 {
7127 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
7128 }
7129 EXPORT_SYMBOL(ata_print_version);
7130
7131 /*
7132 * libata is essentially a library of internal helper functions for
7133 * low-level ATA host controller drivers. As such, the API/ABI is
7134 * likely to change as new drivers are added and updated.
7135 * Do not depend on ABI/API stability.
7136 */
7137 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7138 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7139 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7140 EXPORT_SYMBOL_GPL(ata_base_port_ops);
7141 EXPORT_SYMBOL_GPL(sata_port_ops);
7142 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7143 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7144 EXPORT_SYMBOL_GPL(ata_link_next);
7145 EXPORT_SYMBOL_GPL(ata_dev_next);
7146 EXPORT_SYMBOL_GPL(ata_std_bios_param);
7147 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
7148 EXPORT_SYMBOL_GPL(ata_host_init);
7149 EXPORT_SYMBOL_GPL(ata_host_alloc);
7150 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7151 EXPORT_SYMBOL_GPL(ata_slave_link_init);
7152 EXPORT_SYMBOL_GPL(ata_host_start);
7153 EXPORT_SYMBOL_GPL(ata_host_register);
7154 EXPORT_SYMBOL_GPL(ata_host_activate);
7155 EXPORT_SYMBOL_GPL(ata_host_detach);
7156 EXPORT_SYMBOL_GPL(ata_sg_init);
7157 EXPORT_SYMBOL_GPL(ata_qc_complete);
7158 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7159 EXPORT_SYMBOL_GPL(atapi_cmd_type);
7160 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7161 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7162 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7163 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7164 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7165 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7166 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7167 EXPORT_SYMBOL_GPL(ata_mode_string);
7168 EXPORT_SYMBOL_GPL(ata_id_xfermask);
7169 EXPORT_SYMBOL_GPL(ata_do_set_mode);
7170 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7171 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7172 EXPORT_SYMBOL_GPL(ata_dev_disable);
7173 EXPORT_SYMBOL_GPL(sata_set_spd);
7174 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7175 EXPORT_SYMBOL_GPL(sata_link_debounce);
7176 EXPORT_SYMBOL_GPL(sata_link_resume);
7177 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
7178 EXPORT_SYMBOL_GPL(ata_std_prereset);
7179 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7180 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7181 EXPORT_SYMBOL_GPL(ata_std_postreset);
7182 EXPORT_SYMBOL_GPL(ata_dev_classify);
7183 EXPORT_SYMBOL_GPL(ata_dev_pair);
7184 EXPORT_SYMBOL_GPL(ata_ratelimit);
7185 EXPORT_SYMBOL_GPL(ata_msleep);
7186 EXPORT_SYMBOL_GPL(ata_wait_register);
7187 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7188 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7189 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7190 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7191 EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
7192 EXPORT_SYMBOL_GPL(sata_scr_valid);
7193 EXPORT_SYMBOL_GPL(sata_scr_read);
7194 EXPORT_SYMBOL_GPL(sata_scr_write);
7195 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7196 EXPORT_SYMBOL_GPL(ata_link_online);
7197 EXPORT_SYMBOL_GPL(ata_link_offline);
7198 #ifdef CONFIG_PM
7199 EXPORT_SYMBOL_GPL(ata_host_suspend);
7200 EXPORT_SYMBOL_GPL(ata_host_resume);
7201 #endif /* CONFIG_PM */
7202 EXPORT_SYMBOL_GPL(ata_id_string);
7203 EXPORT_SYMBOL_GPL(ata_id_c_string);
7204 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
7205 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7206
7207 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7208 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7209 EXPORT_SYMBOL_GPL(ata_timing_compute);
7210 EXPORT_SYMBOL_GPL(ata_timing_merge);
7211 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7212
7213 #ifdef CONFIG_PCI
7214 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7215 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7216 #ifdef CONFIG_PM
7217 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7218 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7219 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7220 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7221 #endif /* CONFIG_PM */
7222 #endif /* CONFIG_PCI */
7223
7224 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
7225
7226 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7227 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7228 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7229 EXPORT_SYMBOL_GPL(ata_port_desc);
7230 #ifdef CONFIG_PCI
7231 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7232 #endif /* CONFIG_PCI */
7233 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7234 EXPORT_SYMBOL_GPL(ata_link_abort);
7235 EXPORT_SYMBOL_GPL(ata_port_abort);
7236 EXPORT_SYMBOL_GPL(ata_port_freeze);
7237 EXPORT_SYMBOL_GPL(sata_async_notification);
7238 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7239 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7240 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7241 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7242 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
7243 EXPORT_SYMBOL_GPL(ata_do_eh);
7244 EXPORT_SYMBOL_GPL(ata_std_error_handler);
7245
7246 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7247 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7248 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7249 EXPORT_SYMBOL_GPL(ata_cable_ignore);
7250 EXPORT_SYMBOL_GPL(ata_cable_sata);