]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/ata/libata-core.c
libata: use blk taging
[mirror_ubuntu-artful-kernel.git] / drivers / ata / libata-core.c
1 /*
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
41 */
42
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <linux/async.h>
60 #include <linux/log2.h>
61 #include <linux/slab.h>
62 #include <linux/glob.h>
63 #include <scsi/scsi.h>
64 #include <scsi/scsi_cmnd.h>
65 #include <scsi/scsi_host.h>
66 #include <linux/libata.h>
67 #include <asm/byteorder.h>
68 #include <linux/cdrom.h>
69 #include <linux/ratelimit.h>
70 #include <linux/pm_runtime.h>
71 #include <linux/platform_device.h>
72
73 #include "libata.h"
74 #include "libata-transport.h"
75
76 /* debounce timing parameters in msecs { interval, duration, timeout } */
77 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
78 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
79 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
80
81 const struct ata_port_operations ata_base_port_ops = {
82 .prereset = ata_std_prereset,
83 .postreset = ata_std_postreset,
84 .error_handler = ata_std_error_handler,
85 .sched_eh = ata_std_sched_eh,
86 .end_eh = ata_std_end_eh,
87 };
88
89 const struct ata_port_operations sata_port_ops = {
90 .inherits = &ata_base_port_ops,
91
92 .qc_defer = ata_std_qc_defer,
93 .hardreset = sata_std_hardreset,
94 };
95
96 static unsigned int ata_dev_init_params(struct ata_device *dev,
97 u16 heads, u16 sectors);
98 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
99 static void ata_dev_xfermask(struct ata_device *dev);
100 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
101
102 atomic_t ata_print_id = ATOMIC_INIT(0);
103
104 struct ata_force_param {
105 const char *name;
106 unsigned int cbl;
107 int spd_limit;
108 unsigned long xfer_mask;
109 unsigned int horkage_on;
110 unsigned int horkage_off;
111 unsigned int lflags;
112 };
113
114 struct ata_force_ent {
115 int port;
116 int device;
117 struct ata_force_param param;
118 };
119
120 static struct ata_force_ent *ata_force_tbl;
121 static int ata_force_tbl_size;
122
123 static char ata_force_param_buf[PAGE_SIZE] __initdata;
124 /* param_buf is thrown away after initialization, disallow read */
125 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
126 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
127
128 static int atapi_enabled = 1;
129 module_param(atapi_enabled, int, 0444);
130 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
131
132 static int atapi_dmadir = 0;
133 module_param(atapi_dmadir, int, 0444);
134 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
135
136 int atapi_passthru16 = 1;
137 module_param(atapi_passthru16, int, 0444);
138 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
139
140 int libata_fua = 0;
141 module_param_named(fua, libata_fua, int, 0444);
142 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
143
144 static int ata_ignore_hpa;
145 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
146 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
147
148 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
149 module_param_named(dma, libata_dma_mask, int, 0444);
150 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
151
152 static int ata_probe_timeout;
153 module_param(ata_probe_timeout, int, 0444);
154 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
155
156 int libata_noacpi = 0;
157 module_param_named(noacpi, libata_noacpi, int, 0444);
158 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
159
160 int libata_allow_tpm = 0;
161 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
162 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
163
164 static int atapi_an;
165 module_param(atapi_an, int, 0444);
166 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
167
168 MODULE_AUTHOR("Jeff Garzik");
169 MODULE_DESCRIPTION("Library module for ATA devices");
170 MODULE_LICENSE("GPL");
171 MODULE_VERSION(DRV_VERSION);
172
173
174 static bool ata_sstatus_online(u32 sstatus)
175 {
176 return (sstatus & 0xf) == 0x3;
177 }
178
179 /**
180 * ata_link_next - link iteration helper
181 * @link: the previous link, NULL to start
182 * @ap: ATA port containing links to iterate
183 * @mode: iteration mode, one of ATA_LITER_*
184 *
185 * LOCKING:
186 * Host lock or EH context.
187 *
188 * RETURNS:
189 * Pointer to the next link.
190 */
191 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
192 enum ata_link_iter_mode mode)
193 {
194 BUG_ON(mode != ATA_LITER_EDGE &&
195 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
196
197 /* NULL link indicates start of iteration */
198 if (!link)
199 switch (mode) {
200 case ATA_LITER_EDGE:
201 case ATA_LITER_PMP_FIRST:
202 if (sata_pmp_attached(ap))
203 return ap->pmp_link;
204 /* fall through */
205 case ATA_LITER_HOST_FIRST:
206 return &ap->link;
207 }
208
209 /* we just iterated over the host link, what's next? */
210 if (link == &ap->link)
211 switch (mode) {
212 case ATA_LITER_HOST_FIRST:
213 if (sata_pmp_attached(ap))
214 return ap->pmp_link;
215 /* fall through */
216 case ATA_LITER_PMP_FIRST:
217 if (unlikely(ap->slave_link))
218 return ap->slave_link;
219 /* fall through */
220 case ATA_LITER_EDGE:
221 return NULL;
222 }
223
224 /* slave_link excludes PMP */
225 if (unlikely(link == ap->slave_link))
226 return NULL;
227
228 /* we were over a PMP link */
229 if (++link < ap->pmp_link + ap->nr_pmp_links)
230 return link;
231
232 if (mode == ATA_LITER_PMP_FIRST)
233 return &ap->link;
234
235 return NULL;
236 }
237
238 /**
239 * ata_dev_next - device iteration helper
240 * @dev: the previous device, NULL to start
241 * @link: ATA link containing devices to iterate
242 * @mode: iteration mode, one of ATA_DITER_*
243 *
244 * LOCKING:
245 * Host lock or EH context.
246 *
247 * RETURNS:
248 * Pointer to the next device.
249 */
250 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
251 enum ata_dev_iter_mode mode)
252 {
253 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
254 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
255
256 /* NULL dev indicates start of iteration */
257 if (!dev)
258 switch (mode) {
259 case ATA_DITER_ENABLED:
260 case ATA_DITER_ALL:
261 dev = link->device;
262 goto check;
263 case ATA_DITER_ENABLED_REVERSE:
264 case ATA_DITER_ALL_REVERSE:
265 dev = link->device + ata_link_max_devices(link) - 1;
266 goto check;
267 }
268
269 next:
270 /* move to the next one */
271 switch (mode) {
272 case ATA_DITER_ENABLED:
273 case ATA_DITER_ALL:
274 if (++dev < link->device + ata_link_max_devices(link))
275 goto check;
276 return NULL;
277 case ATA_DITER_ENABLED_REVERSE:
278 case ATA_DITER_ALL_REVERSE:
279 if (--dev >= link->device)
280 goto check;
281 return NULL;
282 }
283
284 check:
285 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
286 !ata_dev_enabled(dev))
287 goto next;
288 return dev;
289 }
290
291 /**
292 * ata_dev_phys_link - find physical link for a device
293 * @dev: ATA device to look up physical link for
294 *
295 * Look up physical link which @dev is attached to. Note that
296 * this is different from @dev->link only when @dev is on slave
297 * link. For all other cases, it's the same as @dev->link.
298 *
299 * LOCKING:
300 * Don't care.
301 *
302 * RETURNS:
303 * Pointer to the found physical link.
304 */
305 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
306 {
307 struct ata_port *ap = dev->link->ap;
308
309 if (!ap->slave_link)
310 return dev->link;
311 if (!dev->devno)
312 return &ap->link;
313 return ap->slave_link;
314 }
315
316 /**
317 * ata_force_cbl - force cable type according to libata.force
318 * @ap: ATA port of interest
319 *
320 * Force cable type according to libata.force and whine about it.
321 * The last entry which has matching port number is used, so it
322 * can be specified as part of device force parameters. For
323 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
324 * same effect.
325 *
326 * LOCKING:
327 * EH context.
328 */
329 void ata_force_cbl(struct ata_port *ap)
330 {
331 int i;
332
333 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
334 const struct ata_force_ent *fe = &ata_force_tbl[i];
335
336 if (fe->port != -1 && fe->port != ap->print_id)
337 continue;
338
339 if (fe->param.cbl == ATA_CBL_NONE)
340 continue;
341
342 ap->cbl = fe->param.cbl;
343 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
344 return;
345 }
346 }
347
348 /**
349 * ata_force_link_limits - force link limits according to libata.force
350 * @link: ATA link of interest
351 *
352 * Force link flags and SATA spd limit according to libata.force
353 * and whine about it. When only the port part is specified
354 * (e.g. 1:), the limit applies to all links connected to both
355 * the host link and all fan-out ports connected via PMP. If the
356 * device part is specified as 0 (e.g. 1.00:), it specifies the
357 * first fan-out link not the host link. Device number 15 always
358 * points to the host link whether PMP is attached or not. If the
359 * controller has slave link, device number 16 points to it.
360 *
361 * LOCKING:
362 * EH context.
363 */
364 static void ata_force_link_limits(struct ata_link *link)
365 {
366 bool did_spd = false;
367 int linkno = link->pmp;
368 int i;
369
370 if (ata_is_host_link(link))
371 linkno += 15;
372
373 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
374 const struct ata_force_ent *fe = &ata_force_tbl[i];
375
376 if (fe->port != -1 && fe->port != link->ap->print_id)
377 continue;
378
379 if (fe->device != -1 && fe->device != linkno)
380 continue;
381
382 /* only honor the first spd limit */
383 if (!did_spd && fe->param.spd_limit) {
384 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
385 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
386 fe->param.name);
387 did_spd = true;
388 }
389
390 /* let lflags stack */
391 if (fe->param.lflags) {
392 link->flags |= fe->param.lflags;
393 ata_link_notice(link,
394 "FORCE: link flag 0x%x forced -> 0x%x\n",
395 fe->param.lflags, link->flags);
396 }
397 }
398 }
399
400 /**
401 * ata_force_xfermask - force xfermask according to libata.force
402 * @dev: ATA device of interest
403 *
404 * Force xfer_mask according to libata.force and whine about it.
405 * For consistency with link selection, device number 15 selects
406 * the first device connected to the host link.
407 *
408 * LOCKING:
409 * EH context.
410 */
411 static void ata_force_xfermask(struct ata_device *dev)
412 {
413 int devno = dev->link->pmp + dev->devno;
414 int alt_devno = devno;
415 int i;
416
417 /* allow n.15/16 for devices attached to host port */
418 if (ata_is_host_link(dev->link))
419 alt_devno += 15;
420
421 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
422 const struct ata_force_ent *fe = &ata_force_tbl[i];
423 unsigned long pio_mask, mwdma_mask, udma_mask;
424
425 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
426 continue;
427
428 if (fe->device != -1 && fe->device != devno &&
429 fe->device != alt_devno)
430 continue;
431
432 if (!fe->param.xfer_mask)
433 continue;
434
435 ata_unpack_xfermask(fe->param.xfer_mask,
436 &pio_mask, &mwdma_mask, &udma_mask);
437 if (udma_mask)
438 dev->udma_mask = udma_mask;
439 else if (mwdma_mask) {
440 dev->udma_mask = 0;
441 dev->mwdma_mask = mwdma_mask;
442 } else {
443 dev->udma_mask = 0;
444 dev->mwdma_mask = 0;
445 dev->pio_mask = pio_mask;
446 }
447
448 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
449 fe->param.name);
450 return;
451 }
452 }
453
454 /**
455 * ata_force_horkage - force horkage according to libata.force
456 * @dev: ATA device of interest
457 *
458 * Force horkage according to libata.force and whine about it.
459 * For consistency with link selection, device number 15 selects
460 * the first device connected to the host link.
461 *
462 * LOCKING:
463 * EH context.
464 */
465 static void ata_force_horkage(struct ata_device *dev)
466 {
467 int devno = dev->link->pmp + dev->devno;
468 int alt_devno = devno;
469 int i;
470
471 /* allow n.15/16 for devices attached to host port */
472 if (ata_is_host_link(dev->link))
473 alt_devno += 15;
474
475 for (i = 0; i < ata_force_tbl_size; i++) {
476 const struct ata_force_ent *fe = &ata_force_tbl[i];
477
478 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
479 continue;
480
481 if (fe->device != -1 && fe->device != devno &&
482 fe->device != alt_devno)
483 continue;
484
485 if (!(~dev->horkage & fe->param.horkage_on) &&
486 !(dev->horkage & fe->param.horkage_off))
487 continue;
488
489 dev->horkage |= fe->param.horkage_on;
490 dev->horkage &= ~fe->param.horkage_off;
491
492 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
493 fe->param.name);
494 }
495 }
496
497 /**
498 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
499 * @opcode: SCSI opcode
500 *
501 * Determine ATAPI command type from @opcode.
502 *
503 * LOCKING:
504 * None.
505 *
506 * RETURNS:
507 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
508 */
509 int atapi_cmd_type(u8 opcode)
510 {
511 switch (opcode) {
512 case GPCMD_READ_10:
513 case GPCMD_READ_12:
514 return ATAPI_READ;
515
516 case GPCMD_WRITE_10:
517 case GPCMD_WRITE_12:
518 case GPCMD_WRITE_AND_VERIFY_10:
519 return ATAPI_WRITE;
520
521 case GPCMD_READ_CD:
522 case GPCMD_READ_CD_MSF:
523 return ATAPI_READ_CD;
524
525 case ATA_16:
526 case ATA_12:
527 if (atapi_passthru16)
528 return ATAPI_PASS_THRU;
529 /* fall thru */
530 default:
531 return ATAPI_MISC;
532 }
533 }
534
535 /**
536 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
537 * @tf: Taskfile to convert
538 * @pmp: Port multiplier port
539 * @is_cmd: This FIS is for command
540 * @fis: Buffer into which data will output
541 *
542 * Converts a standard ATA taskfile to a Serial ATA
543 * FIS structure (Register - Host to Device).
544 *
545 * LOCKING:
546 * Inherited from caller.
547 */
548 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
549 {
550 fis[0] = 0x27; /* Register - Host to Device FIS */
551 fis[1] = pmp & 0xf; /* Port multiplier number*/
552 if (is_cmd)
553 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
554
555 fis[2] = tf->command;
556 fis[3] = tf->feature;
557
558 fis[4] = tf->lbal;
559 fis[5] = tf->lbam;
560 fis[6] = tf->lbah;
561 fis[7] = tf->device;
562
563 fis[8] = tf->hob_lbal;
564 fis[9] = tf->hob_lbam;
565 fis[10] = tf->hob_lbah;
566 fis[11] = tf->hob_feature;
567
568 fis[12] = tf->nsect;
569 fis[13] = tf->hob_nsect;
570 fis[14] = 0;
571 fis[15] = tf->ctl;
572
573 fis[16] = tf->auxiliary & 0xff;
574 fis[17] = (tf->auxiliary >> 8) & 0xff;
575 fis[18] = (tf->auxiliary >> 16) & 0xff;
576 fis[19] = (tf->auxiliary >> 24) & 0xff;
577 }
578
579 /**
580 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
581 * @fis: Buffer from which data will be input
582 * @tf: Taskfile to output
583 *
584 * Converts a serial ATA FIS structure to a standard ATA taskfile.
585 *
586 * LOCKING:
587 * Inherited from caller.
588 */
589
590 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
591 {
592 tf->command = fis[2]; /* status */
593 tf->feature = fis[3]; /* error */
594
595 tf->lbal = fis[4];
596 tf->lbam = fis[5];
597 tf->lbah = fis[6];
598 tf->device = fis[7];
599
600 tf->hob_lbal = fis[8];
601 tf->hob_lbam = fis[9];
602 tf->hob_lbah = fis[10];
603
604 tf->nsect = fis[12];
605 tf->hob_nsect = fis[13];
606 }
607
608 static const u8 ata_rw_cmds[] = {
609 /* pio multi */
610 ATA_CMD_READ_MULTI,
611 ATA_CMD_WRITE_MULTI,
612 ATA_CMD_READ_MULTI_EXT,
613 ATA_CMD_WRITE_MULTI_EXT,
614 0,
615 0,
616 0,
617 ATA_CMD_WRITE_MULTI_FUA_EXT,
618 /* pio */
619 ATA_CMD_PIO_READ,
620 ATA_CMD_PIO_WRITE,
621 ATA_CMD_PIO_READ_EXT,
622 ATA_CMD_PIO_WRITE_EXT,
623 0,
624 0,
625 0,
626 0,
627 /* dma */
628 ATA_CMD_READ,
629 ATA_CMD_WRITE,
630 ATA_CMD_READ_EXT,
631 ATA_CMD_WRITE_EXT,
632 0,
633 0,
634 0,
635 ATA_CMD_WRITE_FUA_EXT
636 };
637
638 /**
639 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
640 * @tf: command to examine and configure
641 * @dev: device tf belongs to
642 *
643 * Examine the device configuration and tf->flags to calculate
644 * the proper read/write commands and protocol to use.
645 *
646 * LOCKING:
647 * caller.
648 */
649 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
650 {
651 u8 cmd;
652
653 int index, fua, lba48, write;
654
655 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
656 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
657 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
658
659 if (dev->flags & ATA_DFLAG_PIO) {
660 tf->protocol = ATA_PROT_PIO;
661 index = dev->multi_count ? 0 : 8;
662 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
663 /* Unable to use DMA due to host limitation */
664 tf->protocol = ATA_PROT_PIO;
665 index = dev->multi_count ? 0 : 8;
666 } else {
667 tf->protocol = ATA_PROT_DMA;
668 index = 16;
669 }
670
671 cmd = ata_rw_cmds[index + fua + lba48 + write];
672 if (cmd) {
673 tf->command = cmd;
674 return 0;
675 }
676 return -1;
677 }
678
679 /**
680 * ata_tf_read_block - Read block address from ATA taskfile
681 * @tf: ATA taskfile of interest
682 * @dev: ATA device @tf belongs to
683 *
684 * LOCKING:
685 * None.
686 *
687 * Read block address from @tf. This function can handle all
688 * three address formats - LBA, LBA48 and CHS. tf->protocol and
689 * flags select the address format to use.
690 *
691 * RETURNS:
692 * Block address read from @tf.
693 */
694 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
695 {
696 u64 block = 0;
697
698 if (tf->flags & ATA_TFLAG_LBA) {
699 if (tf->flags & ATA_TFLAG_LBA48) {
700 block |= (u64)tf->hob_lbah << 40;
701 block |= (u64)tf->hob_lbam << 32;
702 block |= (u64)tf->hob_lbal << 24;
703 } else
704 block |= (tf->device & 0xf) << 24;
705
706 block |= tf->lbah << 16;
707 block |= tf->lbam << 8;
708 block |= tf->lbal;
709 } else {
710 u32 cyl, head, sect;
711
712 cyl = tf->lbam | (tf->lbah << 8);
713 head = tf->device & 0xf;
714 sect = tf->lbal;
715
716 if (!sect) {
717 ata_dev_warn(dev,
718 "device reported invalid CHS sector 0\n");
719 sect = 1; /* oh well */
720 }
721
722 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
723 }
724
725 return block;
726 }
727
728 /**
729 * ata_build_rw_tf - Build ATA taskfile for given read/write request
730 * @tf: Target ATA taskfile
731 * @dev: ATA device @tf belongs to
732 * @block: Block address
733 * @n_block: Number of blocks
734 * @tf_flags: RW/FUA etc...
735 * @tag: tag
736 *
737 * LOCKING:
738 * None.
739 *
740 * Build ATA taskfile @tf for read/write request described by
741 * @block, @n_block, @tf_flags and @tag on @dev.
742 *
743 * RETURNS:
744 *
745 * 0 on success, -ERANGE if the request is too large for @dev,
746 * -EINVAL if the request is invalid.
747 */
748 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
749 u64 block, u32 n_block, unsigned int tf_flags,
750 unsigned int tag)
751 {
752 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
753 tf->flags |= tf_flags;
754
755 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
756 /* yay, NCQ */
757 if (!lba_48_ok(block, n_block))
758 return -ERANGE;
759
760 tf->protocol = ATA_PROT_NCQ;
761 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
762
763 if (tf->flags & ATA_TFLAG_WRITE)
764 tf->command = ATA_CMD_FPDMA_WRITE;
765 else
766 tf->command = ATA_CMD_FPDMA_READ;
767
768 tf->nsect = tag << 3;
769 tf->hob_feature = (n_block >> 8) & 0xff;
770 tf->feature = n_block & 0xff;
771
772 tf->hob_lbah = (block >> 40) & 0xff;
773 tf->hob_lbam = (block >> 32) & 0xff;
774 tf->hob_lbal = (block >> 24) & 0xff;
775 tf->lbah = (block >> 16) & 0xff;
776 tf->lbam = (block >> 8) & 0xff;
777 tf->lbal = block & 0xff;
778
779 tf->device = ATA_LBA;
780 if (tf->flags & ATA_TFLAG_FUA)
781 tf->device |= 1 << 7;
782 } else if (dev->flags & ATA_DFLAG_LBA) {
783 tf->flags |= ATA_TFLAG_LBA;
784
785 if (lba_28_ok(block, n_block)) {
786 /* use LBA28 */
787 tf->device |= (block >> 24) & 0xf;
788 } else if (lba_48_ok(block, n_block)) {
789 if (!(dev->flags & ATA_DFLAG_LBA48))
790 return -ERANGE;
791
792 /* use LBA48 */
793 tf->flags |= ATA_TFLAG_LBA48;
794
795 tf->hob_nsect = (n_block >> 8) & 0xff;
796
797 tf->hob_lbah = (block >> 40) & 0xff;
798 tf->hob_lbam = (block >> 32) & 0xff;
799 tf->hob_lbal = (block >> 24) & 0xff;
800 } else
801 /* request too large even for LBA48 */
802 return -ERANGE;
803
804 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
805 return -EINVAL;
806
807 tf->nsect = n_block & 0xff;
808
809 tf->lbah = (block >> 16) & 0xff;
810 tf->lbam = (block >> 8) & 0xff;
811 tf->lbal = block & 0xff;
812
813 tf->device |= ATA_LBA;
814 } else {
815 /* CHS */
816 u32 sect, head, cyl, track;
817
818 /* The request -may- be too large for CHS addressing. */
819 if (!lba_28_ok(block, n_block))
820 return -ERANGE;
821
822 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
823 return -EINVAL;
824
825 /* Convert LBA to CHS */
826 track = (u32)block / dev->sectors;
827 cyl = track / dev->heads;
828 head = track % dev->heads;
829 sect = (u32)block % dev->sectors + 1;
830
831 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
832 (u32)block, track, cyl, head, sect);
833
834 /* Check whether the converted CHS can fit.
835 Cylinder: 0-65535
836 Head: 0-15
837 Sector: 1-255*/
838 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
839 return -ERANGE;
840
841 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
842 tf->lbal = sect;
843 tf->lbam = cyl;
844 tf->lbah = cyl >> 8;
845 tf->device |= head;
846 }
847
848 return 0;
849 }
850
851 /**
852 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
853 * @pio_mask: pio_mask
854 * @mwdma_mask: mwdma_mask
855 * @udma_mask: udma_mask
856 *
857 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
858 * unsigned int xfer_mask.
859 *
860 * LOCKING:
861 * None.
862 *
863 * RETURNS:
864 * Packed xfer_mask.
865 */
866 unsigned long ata_pack_xfermask(unsigned long pio_mask,
867 unsigned long mwdma_mask,
868 unsigned long udma_mask)
869 {
870 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
871 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
872 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
873 }
874
875 /**
876 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
877 * @xfer_mask: xfer_mask to unpack
878 * @pio_mask: resulting pio_mask
879 * @mwdma_mask: resulting mwdma_mask
880 * @udma_mask: resulting udma_mask
881 *
882 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
883 * Any NULL distination masks will be ignored.
884 */
885 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
886 unsigned long *mwdma_mask, unsigned long *udma_mask)
887 {
888 if (pio_mask)
889 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
890 if (mwdma_mask)
891 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
892 if (udma_mask)
893 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
894 }
895
896 static const struct ata_xfer_ent {
897 int shift, bits;
898 u8 base;
899 } ata_xfer_tbl[] = {
900 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
901 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
902 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
903 { -1, },
904 };
905
906 /**
907 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
908 * @xfer_mask: xfer_mask of interest
909 *
910 * Return matching XFER_* value for @xfer_mask. Only the highest
911 * bit of @xfer_mask is considered.
912 *
913 * LOCKING:
914 * None.
915 *
916 * RETURNS:
917 * Matching XFER_* value, 0xff if no match found.
918 */
919 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
920 {
921 int highbit = fls(xfer_mask) - 1;
922 const struct ata_xfer_ent *ent;
923
924 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
925 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
926 return ent->base + highbit - ent->shift;
927 return 0xff;
928 }
929
930 /**
931 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
932 * @xfer_mode: XFER_* of interest
933 *
934 * Return matching xfer_mask for @xfer_mode.
935 *
936 * LOCKING:
937 * None.
938 *
939 * RETURNS:
940 * Matching xfer_mask, 0 if no match found.
941 */
942 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
943 {
944 const struct ata_xfer_ent *ent;
945
946 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
947 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
948 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
949 & ~((1 << ent->shift) - 1);
950 return 0;
951 }
952
953 /**
954 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
955 * @xfer_mode: XFER_* of interest
956 *
957 * Return matching xfer_shift for @xfer_mode.
958 *
959 * LOCKING:
960 * None.
961 *
962 * RETURNS:
963 * Matching xfer_shift, -1 if no match found.
964 */
965 int ata_xfer_mode2shift(unsigned long xfer_mode)
966 {
967 const struct ata_xfer_ent *ent;
968
969 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
970 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
971 return ent->shift;
972 return -1;
973 }
974
975 /**
976 * ata_mode_string - convert xfer_mask to string
977 * @xfer_mask: mask of bits supported; only highest bit counts.
978 *
979 * Determine string which represents the highest speed
980 * (highest bit in @modemask).
981 *
982 * LOCKING:
983 * None.
984 *
985 * RETURNS:
986 * Constant C string representing highest speed listed in
987 * @mode_mask, or the constant C string "<n/a>".
988 */
989 const char *ata_mode_string(unsigned long xfer_mask)
990 {
991 static const char * const xfer_mode_str[] = {
992 "PIO0",
993 "PIO1",
994 "PIO2",
995 "PIO3",
996 "PIO4",
997 "PIO5",
998 "PIO6",
999 "MWDMA0",
1000 "MWDMA1",
1001 "MWDMA2",
1002 "MWDMA3",
1003 "MWDMA4",
1004 "UDMA/16",
1005 "UDMA/25",
1006 "UDMA/33",
1007 "UDMA/44",
1008 "UDMA/66",
1009 "UDMA/100",
1010 "UDMA/133",
1011 "UDMA7",
1012 };
1013 int highbit;
1014
1015 highbit = fls(xfer_mask) - 1;
1016 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1017 return xfer_mode_str[highbit];
1018 return "<n/a>";
1019 }
1020
1021 const char *sata_spd_string(unsigned int spd)
1022 {
1023 static const char * const spd_str[] = {
1024 "1.5 Gbps",
1025 "3.0 Gbps",
1026 "6.0 Gbps",
1027 };
1028
1029 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1030 return "<unknown>";
1031 return spd_str[spd - 1];
1032 }
1033
1034 /**
1035 * ata_dev_classify - determine device type based on ATA-spec signature
1036 * @tf: ATA taskfile register set for device to be identified
1037 *
1038 * Determine from taskfile register contents whether a device is
1039 * ATA or ATAPI, as per "Signature and persistence" section
1040 * of ATA/PI spec (volume 1, sect 5.14).
1041 *
1042 * LOCKING:
1043 * None.
1044 *
1045 * RETURNS:
1046 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1047 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1048 */
1049 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1050 {
1051 /* Apple's open source Darwin code hints that some devices only
1052 * put a proper signature into the LBA mid/high registers,
1053 * So, we only check those. It's sufficient for uniqueness.
1054 *
1055 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1056 * signatures for ATA and ATAPI devices attached on SerialATA,
1057 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1058 * spec has never mentioned about using different signatures
1059 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1060 * Multiplier specification began to use 0x69/0x96 to identify
1061 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1062 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1063 * 0x69/0x96 shortly and described them as reserved for
1064 * SerialATA.
1065 *
1066 * We follow the current spec and consider that 0x69/0x96
1067 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1068 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1069 * SEMB signature. This is worked around in
1070 * ata_dev_read_id().
1071 */
1072 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1073 DPRINTK("found ATA device by sig\n");
1074 return ATA_DEV_ATA;
1075 }
1076
1077 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1078 DPRINTK("found ATAPI device by sig\n");
1079 return ATA_DEV_ATAPI;
1080 }
1081
1082 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1083 DPRINTK("found PMP device by sig\n");
1084 return ATA_DEV_PMP;
1085 }
1086
1087 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1088 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1089 return ATA_DEV_SEMB;
1090 }
1091
1092 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1093 DPRINTK("found ZAC device by sig\n");
1094 return ATA_DEV_ZAC;
1095 }
1096
1097 DPRINTK("unknown device\n");
1098 return ATA_DEV_UNKNOWN;
1099 }
1100
1101 /**
1102 * ata_id_string - Convert IDENTIFY DEVICE page into string
1103 * @id: IDENTIFY DEVICE results we will examine
1104 * @s: string into which data is output
1105 * @ofs: offset into identify device page
1106 * @len: length of string to return. must be an even number.
1107 *
1108 * The strings in the IDENTIFY DEVICE page are broken up into
1109 * 16-bit chunks. Run through the string, and output each
1110 * 8-bit chunk linearly, regardless of platform.
1111 *
1112 * LOCKING:
1113 * caller.
1114 */
1115
1116 void ata_id_string(const u16 *id, unsigned char *s,
1117 unsigned int ofs, unsigned int len)
1118 {
1119 unsigned int c;
1120
1121 BUG_ON(len & 1);
1122
1123 while (len > 0) {
1124 c = id[ofs] >> 8;
1125 *s = c;
1126 s++;
1127
1128 c = id[ofs] & 0xff;
1129 *s = c;
1130 s++;
1131
1132 ofs++;
1133 len -= 2;
1134 }
1135 }
1136
1137 /**
1138 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1139 * @id: IDENTIFY DEVICE results we will examine
1140 * @s: string into which data is output
1141 * @ofs: offset into identify device page
1142 * @len: length of string to return. must be an odd number.
1143 *
1144 * This function is identical to ata_id_string except that it
1145 * trims trailing spaces and terminates the resulting string with
1146 * null. @len must be actual maximum length (even number) + 1.
1147 *
1148 * LOCKING:
1149 * caller.
1150 */
1151 void ata_id_c_string(const u16 *id, unsigned char *s,
1152 unsigned int ofs, unsigned int len)
1153 {
1154 unsigned char *p;
1155
1156 ata_id_string(id, s, ofs, len - 1);
1157
1158 p = s + strnlen(s, len - 1);
1159 while (p > s && p[-1] == ' ')
1160 p--;
1161 *p = '\0';
1162 }
1163
1164 static u64 ata_id_n_sectors(const u16 *id)
1165 {
1166 if (ata_id_has_lba(id)) {
1167 if (ata_id_has_lba48(id))
1168 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1169 else
1170 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1171 } else {
1172 if (ata_id_current_chs_valid(id))
1173 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1174 id[ATA_ID_CUR_SECTORS];
1175 else
1176 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1177 id[ATA_ID_SECTORS];
1178 }
1179 }
1180
1181 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1182 {
1183 u64 sectors = 0;
1184
1185 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1186 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1187 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1188 sectors |= (tf->lbah & 0xff) << 16;
1189 sectors |= (tf->lbam & 0xff) << 8;
1190 sectors |= (tf->lbal & 0xff);
1191
1192 return sectors;
1193 }
1194
1195 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1196 {
1197 u64 sectors = 0;
1198
1199 sectors |= (tf->device & 0x0f) << 24;
1200 sectors |= (tf->lbah & 0xff) << 16;
1201 sectors |= (tf->lbam & 0xff) << 8;
1202 sectors |= (tf->lbal & 0xff);
1203
1204 return sectors;
1205 }
1206
1207 /**
1208 * ata_read_native_max_address - Read native max address
1209 * @dev: target device
1210 * @max_sectors: out parameter for the result native max address
1211 *
1212 * Perform an LBA48 or LBA28 native size query upon the device in
1213 * question.
1214 *
1215 * RETURNS:
1216 * 0 on success, -EACCES if command is aborted by the drive.
1217 * -EIO on other errors.
1218 */
1219 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1220 {
1221 unsigned int err_mask;
1222 struct ata_taskfile tf;
1223 int lba48 = ata_id_has_lba48(dev->id);
1224
1225 ata_tf_init(dev, &tf);
1226
1227 /* always clear all address registers */
1228 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1229
1230 if (lba48) {
1231 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1232 tf.flags |= ATA_TFLAG_LBA48;
1233 } else
1234 tf.command = ATA_CMD_READ_NATIVE_MAX;
1235
1236 tf.protocol |= ATA_PROT_NODATA;
1237 tf.device |= ATA_LBA;
1238
1239 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1240 if (err_mask) {
1241 ata_dev_warn(dev,
1242 "failed to read native max address (err_mask=0x%x)\n",
1243 err_mask);
1244 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1245 return -EACCES;
1246 return -EIO;
1247 }
1248
1249 if (lba48)
1250 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1251 else
1252 *max_sectors = ata_tf_to_lba(&tf) + 1;
1253 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1254 (*max_sectors)--;
1255 return 0;
1256 }
1257
1258 /**
1259 * ata_set_max_sectors - Set max sectors
1260 * @dev: target device
1261 * @new_sectors: new max sectors value to set for the device
1262 *
1263 * Set max sectors of @dev to @new_sectors.
1264 *
1265 * RETURNS:
1266 * 0 on success, -EACCES if command is aborted or denied (due to
1267 * previous non-volatile SET_MAX) by the drive. -EIO on other
1268 * errors.
1269 */
1270 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1271 {
1272 unsigned int err_mask;
1273 struct ata_taskfile tf;
1274 int lba48 = ata_id_has_lba48(dev->id);
1275
1276 new_sectors--;
1277
1278 ata_tf_init(dev, &tf);
1279
1280 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1281
1282 if (lba48) {
1283 tf.command = ATA_CMD_SET_MAX_EXT;
1284 tf.flags |= ATA_TFLAG_LBA48;
1285
1286 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1287 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1288 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1289 } else {
1290 tf.command = ATA_CMD_SET_MAX;
1291
1292 tf.device |= (new_sectors >> 24) & 0xf;
1293 }
1294
1295 tf.protocol |= ATA_PROT_NODATA;
1296 tf.device |= ATA_LBA;
1297
1298 tf.lbal = (new_sectors >> 0) & 0xff;
1299 tf.lbam = (new_sectors >> 8) & 0xff;
1300 tf.lbah = (new_sectors >> 16) & 0xff;
1301
1302 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1303 if (err_mask) {
1304 ata_dev_warn(dev,
1305 "failed to set max address (err_mask=0x%x)\n",
1306 err_mask);
1307 if (err_mask == AC_ERR_DEV &&
1308 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1309 return -EACCES;
1310 return -EIO;
1311 }
1312
1313 return 0;
1314 }
1315
1316 /**
1317 * ata_hpa_resize - Resize a device with an HPA set
1318 * @dev: Device to resize
1319 *
1320 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1321 * it if required to the full size of the media. The caller must check
1322 * the drive has the HPA feature set enabled.
1323 *
1324 * RETURNS:
1325 * 0 on success, -errno on failure.
1326 */
1327 static int ata_hpa_resize(struct ata_device *dev)
1328 {
1329 struct ata_eh_context *ehc = &dev->link->eh_context;
1330 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1331 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1332 u64 sectors = ata_id_n_sectors(dev->id);
1333 u64 native_sectors;
1334 int rc;
1335
1336 /* do we need to do it? */
1337 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1338 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1339 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1340 return 0;
1341
1342 /* read native max address */
1343 rc = ata_read_native_max_address(dev, &native_sectors);
1344 if (rc) {
1345 /* If device aborted the command or HPA isn't going to
1346 * be unlocked, skip HPA resizing.
1347 */
1348 if (rc == -EACCES || !unlock_hpa) {
1349 ata_dev_warn(dev,
1350 "HPA support seems broken, skipping HPA handling\n");
1351 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1352
1353 /* we can continue if device aborted the command */
1354 if (rc == -EACCES)
1355 rc = 0;
1356 }
1357
1358 return rc;
1359 }
1360 dev->n_native_sectors = native_sectors;
1361
1362 /* nothing to do? */
1363 if (native_sectors <= sectors || !unlock_hpa) {
1364 if (!print_info || native_sectors == sectors)
1365 return 0;
1366
1367 if (native_sectors > sectors)
1368 ata_dev_info(dev,
1369 "HPA detected: current %llu, native %llu\n",
1370 (unsigned long long)sectors,
1371 (unsigned long long)native_sectors);
1372 else if (native_sectors < sectors)
1373 ata_dev_warn(dev,
1374 "native sectors (%llu) is smaller than sectors (%llu)\n",
1375 (unsigned long long)native_sectors,
1376 (unsigned long long)sectors);
1377 return 0;
1378 }
1379
1380 /* let's unlock HPA */
1381 rc = ata_set_max_sectors(dev, native_sectors);
1382 if (rc == -EACCES) {
1383 /* if device aborted the command, skip HPA resizing */
1384 ata_dev_warn(dev,
1385 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1386 (unsigned long long)sectors,
1387 (unsigned long long)native_sectors);
1388 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1389 return 0;
1390 } else if (rc)
1391 return rc;
1392
1393 /* re-read IDENTIFY data */
1394 rc = ata_dev_reread_id(dev, 0);
1395 if (rc) {
1396 ata_dev_err(dev,
1397 "failed to re-read IDENTIFY data after HPA resizing\n");
1398 return rc;
1399 }
1400
1401 if (print_info) {
1402 u64 new_sectors = ata_id_n_sectors(dev->id);
1403 ata_dev_info(dev,
1404 "HPA unlocked: %llu -> %llu, native %llu\n",
1405 (unsigned long long)sectors,
1406 (unsigned long long)new_sectors,
1407 (unsigned long long)native_sectors);
1408 }
1409
1410 return 0;
1411 }
1412
1413 /**
1414 * ata_dump_id - IDENTIFY DEVICE info debugging output
1415 * @id: IDENTIFY DEVICE page to dump
1416 *
1417 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1418 * page.
1419 *
1420 * LOCKING:
1421 * caller.
1422 */
1423
1424 static inline void ata_dump_id(const u16 *id)
1425 {
1426 DPRINTK("49==0x%04x "
1427 "53==0x%04x "
1428 "63==0x%04x "
1429 "64==0x%04x "
1430 "75==0x%04x \n",
1431 id[49],
1432 id[53],
1433 id[63],
1434 id[64],
1435 id[75]);
1436 DPRINTK("80==0x%04x "
1437 "81==0x%04x "
1438 "82==0x%04x "
1439 "83==0x%04x "
1440 "84==0x%04x \n",
1441 id[80],
1442 id[81],
1443 id[82],
1444 id[83],
1445 id[84]);
1446 DPRINTK("88==0x%04x "
1447 "93==0x%04x\n",
1448 id[88],
1449 id[93]);
1450 }
1451
1452 /**
1453 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1454 * @id: IDENTIFY data to compute xfer mask from
1455 *
1456 * Compute the xfermask for this device. This is not as trivial
1457 * as it seems if we must consider early devices correctly.
1458 *
1459 * FIXME: pre IDE drive timing (do we care ?).
1460 *
1461 * LOCKING:
1462 * None.
1463 *
1464 * RETURNS:
1465 * Computed xfermask
1466 */
1467 unsigned long ata_id_xfermask(const u16 *id)
1468 {
1469 unsigned long pio_mask, mwdma_mask, udma_mask;
1470
1471 /* Usual case. Word 53 indicates word 64 is valid */
1472 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1473 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1474 pio_mask <<= 3;
1475 pio_mask |= 0x7;
1476 } else {
1477 /* If word 64 isn't valid then Word 51 high byte holds
1478 * the PIO timing number for the maximum. Turn it into
1479 * a mask.
1480 */
1481 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1482 if (mode < 5) /* Valid PIO range */
1483 pio_mask = (2 << mode) - 1;
1484 else
1485 pio_mask = 1;
1486
1487 /* But wait.. there's more. Design your standards by
1488 * committee and you too can get a free iordy field to
1489 * process. However its the speeds not the modes that
1490 * are supported... Note drivers using the timing API
1491 * will get this right anyway
1492 */
1493 }
1494
1495 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1496
1497 if (ata_id_is_cfa(id)) {
1498 /*
1499 * Process compact flash extended modes
1500 */
1501 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1502 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1503
1504 if (pio)
1505 pio_mask |= (1 << 5);
1506 if (pio > 1)
1507 pio_mask |= (1 << 6);
1508 if (dma)
1509 mwdma_mask |= (1 << 3);
1510 if (dma > 1)
1511 mwdma_mask |= (1 << 4);
1512 }
1513
1514 udma_mask = 0;
1515 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1516 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1517
1518 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1519 }
1520
1521 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1522 {
1523 struct completion *waiting = qc->private_data;
1524
1525 complete(waiting);
1526 }
1527
1528 static bool ata_valid_internal_tag(struct ata_port *ap, struct ata_device *dev,
1529 unsigned int tag)
1530 {
1531 if (!ap->scsi_host)
1532 return !test_and_set_bit(tag, &ap->sas_tag_allocated);
1533 return !dev->sdev ||
1534 !blk_queue_find_tag(dev->sdev->request_queue, tag);
1535 }
1536
1537 /**
1538 * ata_exec_internal_sg - execute libata internal command
1539 * @dev: Device to which the command is sent
1540 * @tf: Taskfile registers for the command and the result
1541 * @cdb: CDB for packet command
1542 * @dma_dir: Data transfer direction of the command
1543 * @sgl: sg list for the data buffer of the command
1544 * @n_elem: Number of sg entries
1545 * @timeout: Timeout in msecs (0 for default)
1546 *
1547 * Executes libata internal command with timeout. @tf contains
1548 * command on entry and result on return. Timeout and error
1549 * conditions are reported via return value. No recovery action
1550 * is taken after a command times out. It's caller's duty to
1551 * clean up after timeout.
1552 *
1553 * LOCKING:
1554 * None. Should be called with kernel context, might sleep.
1555 *
1556 * RETURNS:
1557 * Zero on success, AC_ERR_* mask on failure
1558 */
1559 unsigned ata_exec_internal_sg(struct ata_device *dev,
1560 struct ata_taskfile *tf, const u8 *cdb,
1561 int dma_dir, struct scatterlist *sgl,
1562 unsigned int n_elem, unsigned long timeout)
1563 {
1564 struct ata_link *link = dev->link;
1565 struct ata_port *ap = link->ap;
1566 u8 command = tf->command;
1567 int auto_timeout = 0;
1568 struct ata_queued_cmd *qc;
1569 unsigned int tag, preempted_tag;
1570 u32 preempted_sactive, preempted_qc_active;
1571 int preempted_nr_active_links;
1572 DECLARE_COMPLETION_ONSTACK(wait);
1573 unsigned long flags;
1574 unsigned int err_mask;
1575 int rc;
1576
1577 spin_lock_irqsave(ap->lock, flags);
1578
1579 /* no internal command while frozen */
1580 if (ap->pflags & ATA_PFLAG_FROZEN) {
1581 spin_unlock_irqrestore(ap->lock, flags);
1582 return AC_ERR_SYSTEM;
1583 }
1584
1585 /* initialize internal qc */
1586
1587 /* XXX: Tag 0 is used for drivers with legacy EH as some
1588 * drivers choke if any other tag is given. This breaks
1589 * ata_tag_internal() test for those drivers. Don't use new
1590 * EH stuff without converting to it.
1591 */
1592 if (ap->ops->error_handler)
1593 tag = ATA_TAG_INTERNAL;
1594 else
1595 tag = 0;
1596
1597 BUG_ON(!ata_valid_internal_tag(ap, dev, tag));
1598 qc = __ata_qc_from_tag(ap, tag);
1599
1600 qc->tag = tag;
1601 qc->scsicmd = NULL;
1602 qc->ap = ap;
1603 qc->dev = dev;
1604 ata_qc_reinit(qc);
1605
1606 preempted_tag = link->active_tag;
1607 preempted_sactive = link->sactive;
1608 preempted_qc_active = ap->qc_active;
1609 preempted_nr_active_links = ap->nr_active_links;
1610 link->active_tag = ATA_TAG_POISON;
1611 link->sactive = 0;
1612 ap->qc_active = 0;
1613 ap->nr_active_links = 0;
1614
1615 /* prepare & issue qc */
1616 qc->tf = *tf;
1617 if (cdb)
1618 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1619
1620 /* some SATA bridges need us to indicate data xfer direction */
1621 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1622 dma_dir == DMA_FROM_DEVICE)
1623 qc->tf.feature |= ATAPI_DMADIR;
1624
1625 qc->flags |= ATA_QCFLAG_RESULT_TF;
1626 qc->dma_dir = dma_dir;
1627 if (dma_dir != DMA_NONE) {
1628 unsigned int i, buflen = 0;
1629 struct scatterlist *sg;
1630
1631 for_each_sg(sgl, sg, n_elem, i)
1632 buflen += sg->length;
1633
1634 ata_sg_init(qc, sgl, n_elem);
1635 qc->nbytes = buflen;
1636 }
1637
1638 qc->private_data = &wait;
1639 qc->complete_fn = ata_qc_complete_internal;
1640
1641 ata_qc_issue(qc);
1642
1643 spin_unlock_irqrestore(ap->lock, flags);
1644
1645 if (!timeout) {
1646 if (ata_probe_timeout)
1647 timeout = ata_probe_timeout * 1000;
1648 else {
1649 timeout = ata_internal_cmd_timeout(dev, command);
1650 auto_timeout = 1;
1651 }
1652 }
1653
1654 if (ap->ops->error_handler)
1655 ata_eh_release(ap);
1656
1657 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1658
1659 if (ap->ops->error_handler)
1660 ata_eh_acquire(ap);
1661
1662 ata_sff_flush_pio_task(ap);
1663
1664 if (!rc) {
1665 spin_lock_irqsave(ap->lock, flags);
1666
1667 /* We're racing with irq here. If we lose, the
1668 * following test prevents us from completing the qc
1669 * twice. If we win, the port is frozen and will be
1670 * cleaned up by ->post_internal_cmd().
1671 */
1672 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1673 qc->err_mask |= AC_ERR_TIMEOUT;
1674
1675 if (ap->ops->error_handler)
1676 ata_port_freeze(ap);
1677 else
1678 ata_qc_complete(qc);
1679
1680 if (ata_msg_warn(ap))
1681 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1682 command);
1683 }
1684
1685 spin_unlock_irqrestore(ap->lock, flags);
1686 }
1687
1688 /* do post_internal_cmd */
1689 if (ap->ops->post_internal_cmd)
1690 ap->ops->post_internal_cmd(qc);
1691
1692 /* perform minimal error analysis */
1693 if (qc->flags & ATA_QCFLAG_FAILED) {
1694 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1695 qc->err_mask |= AC_ERR_DEV;
1696
1697 if (!qc->err_mask)
1698 qc->err_mask |= AC_ERR_OTHER;
1699
1700 if (qc->err_mask & ~AC_ERR_OTHER)
1701 qc->err_mask &= ~AC_ERR_OTHER;
1702 }
1703
1704 /* finish up */
1705 spin_lock_irqsave(ap->lock, flags);
1706
1707 *tf = qc->result_tf;
1708 err_mask = qc->err_mask;
1709
1710 ata_qc_free(qc);
1711 link->active_tag = preempted_tag;
1712 link->sactive = preempted_sactive;
1713 ap->qc_active = preempted_qc_active;
1714 ap->nr_active_links = preempted_nr_active_links;
1715
1716 spin_unlock_irqrestore(ap->lock, flags);
1717
1718 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1719 ata_internal_cmd_timed_out(dev, command);
1720
1721 return err_mask;
1722 }
1723
1724 /**
1725 * ata_exec_internal - execute libata internal command
1726 * @dev: Device to which the command is sent
1727 * @tf: Taskfile registers for the command and the result
1728 * @cdb: CDB for packet command
1729 * @dma_dir: Data transfer direction of the command
1730 * @buf: Data buffer of the command
1731 * @buflen: Length of data buffer
1732 * @timeout: Timeout in msecs (0 for default)
1733 *
1734 * Wrapper around ata_exec_internal_sg() which takes simple
1735 * buffer instead of sg list.
1736 *
1737 * LOCKING:
1738 * None. Should be called with kernel context, might sleep.
1739 *
1740 * RETURNS:
1741 * Zero on success, AC_ERR_* mask on failure
1742 */
1743 unsigned ata_exec_internal(struct ata_device *dev,
1744 struct ata_taskfile *tf, const u8 *cdb,
1745 int dma_dir, void *buf, unsigned int buflen,
1746 unsigned long timeout)
1747 {
1748 struct scatterlist *psg = NULL, sg;
1749 unsigned int n_elem = 0;
1750
1751 if (dma_dir != DMA_NONE) {
1752 WARN_ON(!buf);
1753 sg_init_one(&sg, buf, buflen);
1754 psg = &sg;
1755 n_elem++;
1756 }
1757
1758 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1759 timeout);
1760 }
1761
1762 /**
1763 * ata_do_simple_cmd - execute simple internal command
1764 * @dev: Device to which the command is sent
1765 * @cmd: Opcode to execute
1766 *
1767 * Execute a 'simple' command, that only consists of the opcode
1768 * 'cmd' itself, without filling any other registers
1769 *
1770 * LOCKING:
1771 * Kernel thread context (may sleep).
1772 *
1773 * RETURNS:
1774 * Zero on success, AC_ERR_* mask on failure
1775 */
1776 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1777 {
1778 struct ata_taskfile tf;
1779
1780 ata_tf_init(dev, &tf);
1781
1782 tf.command = cmd;
1783 tf.flags |= ATA_TFLAG_DEVICE;
1784 tf.protocol = ATA_PROT_NODATA;
1785
1786 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1787 }
1788
1789 /**
1790 * ata_pio_need_iordy - check if iordy needed
1791 * @adev: ATA device
1792 *
1793 * Check if the current speed of the device requires IORDY. Used
1794 * by various controllers for chip configuration.
1795 */
1796 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1797 {
1798 /* Don't set IORDY if we're preparing for reset. IORDY may
1799 * lead to controller lock up on certain controllers if the
1800 * port is not occupied. See bko#11703 for details.
1801 */
1802 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1803 return 0;
1804 /* Controller doesn't support IORDY. Probably a pointless
1805 * check as the caller should know this.
1806 */
1807 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1808 return 0;
1809 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1810 if (ata_id_is_cfa(adev->id)
1811 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1812 return 0;
1813 /* PIO3 and higher it is mandatory */
1814 if (adev->pio_mode > XFER_PIO_2)
1815 return 1;
1816 /* We turn it on when possible */
1817 if (ata_id_has_iordy(adev->id))
1818 return 1;
1819 return 0;
1820 }
1821
1822 /**
1823 * ata_pio_mask_no_iordy - Return the non IORDY mask
1824 * @adev: ATA device
1825 *
1826 * Compute the highest mode possible if we are not using iordy. Return
1827 * -1 if no iordy mode is available.
1828 */
1829 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1830 {
1831 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1832 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1833 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1834 /* Is the speed faster than the drive allows non IORDY ? */
1835 if (pio) {
1836 /* This is cycle times not frequency - watch the logic! */
1837 if (pio > 240) /* PIO2 is 240nS per cycle */
1838 return 3 << ATA_SHIFT_PIO;
1839 return 7 << ATA_SHIFT_PIO;
1840 }
1841 }
1842 return 3 << ATA_SHIFT_PIO;
1843 }
1844
1845 /**
1846 * ata_do_dev_read_id - default ID read method
1847 * @dev: device
1848 * @tf: proposed taskfile
1849 * @id: data buffer
1850 *
1851 * Issue the identify taskfile and hand back the buffer containing
1852 * identify data. For some RAID controllers and for pre ATA devices
1853 * this function is wrapped or replaced by the driver
1854 */
1855 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1856 struct ata_taskfile *tf, u16 *id)
1857 {
1858 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1859 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1860 }
1861
1862 /**
1863 * ata_dev_read_id - Read ID data from the specified device
1864 * @dev: target device
1865 * @p_class: pointer to class of the target device (may be changed)
1866 * @flags: ATA_READID_* flags
1867 * @id: buffer to read IDENTIFY data into
1868 *
1869 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1870 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1871 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1872 * for pre-ATA4 drives.
1873 *
1874 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1875 * now we abort if we hit that case.
1876 *
1877 * LOCKING:
1878 * Kernel thread context (may sleep)
1879 *
1880 * RETURNS:
1881 * 0 on success, -errno otherwise.
1882 */
1883 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1884 unsigned int flags, u16 *id)
1885 {
1886 struct ata_port *ap = dev->link->ap;
1887 unsigned int class = *p_class;
1888 struct ata_taskfile tf;
1889 unsigned int err_mask = 0;
1890 const char *reason;
1891 bool is_semb = class == ATA_DEV_SEMB;
1892 int may_fallback = 1, tried_spinup = 0;
1893 int rc;
1894
1895 if (ata_msg_ctl(ap))
1896 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1897
1898 retry:
1899 ata_tf_init(dev, &tf);
1900
1901 switch (class) {
1902 case ATA_DEV_SEMB:
1903 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
1904 case ATA_DEV_ATA:
1905 case ATA_DEV_ZAC:
1906 tf.command = ATA_CMD_ID_ATA;
1907 break;
1908 case ATA_DEV_ATAPI:
1909 tf.command = ATA_CMD_ID_ATAPI;
1910 break;
1911 default:
1912 rc = -ENODEV;
1913 reason = "unsupported class";
1914 goto err_out;
1915 }
1916
1917 tf.protocol = ATA_PROT_PIO;
1918
1919 /* Some devices choke if TF registers contain garbage. Make
1920 * sure those are properly initialized.
1921 */
1922 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1923
1924 /* Device presence detection is unreliable on some
1925 * controllers. Always poll IDENTIFY if available.
1926 */
1927 tf.flags |= ATA_TFLAG_POLLING;
1928
1929 if (ap->ops->read_id)
1930 err_mask = ap->ops->read_id(dev, &tf, id);
1931 else
1932 err_mask = ata_do_dev_read_id(dev, &tf, id);
1933
1934 if (err_mask) {
1935 if (err_mask & AC_ERR_NODEV_HINT) {
1936 ata_dev_dbg(dev, "NODEV after polling detection\n");
1937 return -ENOENT;
1938 }
1939
1940 if (is_semb) {
1941 ata_dev_info(dev,
1942 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1943 /* SEMB is not supported yet */
1944 *p_class = ATA_DEV_SEMB_UNSUP;
1945 return 0;
1946 }
1947
1948 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1949 /* Device or controller might have reported
1950 * the wrong device class. Give a shot at the
1951 * other IDENTIFY if the current one is
1952 * aborted by the device.
1953 */
1954 if (may_fallback) {
1955 may_fallback = 0;
1956
1957 if (class == ATA_DEV_ATA)
1958 class = ATA_DEV_ATAPI;
1959 else
1960 class = ATA_DEV_ATA;
1961 goto retry;
1962 }
1963
1964 /* Control reaches here iff the device aborted
1965 * both flavors of IDENTIFYs which happens
1966 * sometimes with phantom devices.
1967 */
1968 ata_dev_dbg(dev,
1969 "both IDENTIFYs aborted, assuming NODEV\n");
1970 return -ENOENT;
1971 }
1972
1973 rc = -EIO;
1974 reason = "I/O error";
1975 goto err_out;
1976 }
1977
1978 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1979 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1980 "class=%d may_fallback=%d tried_spinup=%d\n",
1981 class, may_fallback, tried_spinup);
1982 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1983 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1984 }
1985
1986 /* Falling back doesn't make sense if ID data was read
1987 * successfully at least once.
1988 */
1989 may_fallback = 0;
1990
1991 swap_buf_le16(id, ATA_ID_WORDS);
1992
1993 /* sanity check */
1994 rc = -EINVAL;
1995 reason = "device reports invalid type";
1996
1997 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1998 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1999 goto err_out;
2000 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
2001 ata_id_is_ata(id)) {
2002 ata_dev_dbg(dev,
2003 "host indicates ignore ATA devices, ignored\n");
2004 return -ENOENT;
2005 }
2006 } else {
2007 if (ata_id_is_ata(id))
2008 goto err_out;
2009 }
2010
2011 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2012 tried_spinup = 1;
2013 /*
2014 * Drive powered-up in standby mode, and requires a specific
2015 * SET_FEATURES spin-up subcommand before it will accept
2016 * anything other than the original IDENTIFY command.
2017 */
2018 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2019 if (err_mask && id[2] != 0x738c) {
2020 rc = -EIO;
2021 reason = "SPINUP failed";
2022 goto err_out;
2023 }
2024 /*
2025 * If the drive initially returned incomplete IDENTIFY info,
2026 * we now must reissue the IDENTIFY command.
2027 */
2028 if (id[2] == 0x37c8)
2029 goto retry;
2030 }
2031
2032 if ((flags & ATA_READID_POSTRESET) &&
2033 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
2034 /*
2035 * The exact sequence expected by certain pre-ATA4 drives is:
2036 * SRST RESET
2037 * IDENTIFY (optional in early ATA)
2038 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2039 * anything else..
2040 * Some drives were very specific about that exact sequence.
2041 *
2042 * Note that ATA4 says lba is mandatory so the second check
2043 * should never trigger.
2044 */
2045 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2046 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2047 if (err_mask) {
2048 rc = -EIO;
2049 reason = "INIT_DEV_PARAMS failed";
2050 goto err_out;
2051 }
2052
2053 /* current CHS translation info (id[53-58]) might be
2054 * changed. reread the identify device info.
2055 */
2056 flags &= ~ATA_READID_POSTRESET;
2057 goto retry;
2058 }
2059 }
2060
2061 *p_class = class;
2062
2063 return 0;
2064
2065 err_out:
2066 if (ata_msg_warn(ap))
2067 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2068 reason, err_mask);
2069 return rc;
2070 }
2071
2072 static int ata_do_link_spd_horkage(struct ata_device *dev)
2073 {
2074 struct ata_link *plink = ata_dev_phys_link(dev);
2075 u32 target, target_limit;
2076
2077 if (!sata_scr_valid(plink))
2078 return 0;
2079
2080 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2081 target = 1;
2082 else
2083 return 0;
2084
2085 target_limit = (1 << target) - 1;
2086
2087 /* if already on stricter limit, no need to push further */
2088 if (plink->sata_spd_limit <= target_limit)
2089 return 0;
2090
2091 plink->sata_spd_limit = target_limit;
2092
2093 /* Request another EH round by returning -EAGAIN if link is
2094 * going faster than the target speed. Forward progress is
2095 * guaranteed by setting sata_spd_limit to target_limit above.
2096 */
2097 if (plink->sata_spd > target) {
2098 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2099 sata_spd_string(target));
2100 return -EAGAIN;
2101 }
2102 return 0;
2103 }
2104
2105 static inline u8 ata_dev_knobble(struct ata_device *dev)
2106 {
2107 struct ata_port *ap = dev->link->ap;
2108
2109 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2110 return 0;
2111
2112 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2113 }
2114
2115 static int ata_dev_config_ncq(struct ata_device *dev,
2116 char *desc, size_t desc_sz)
2117 {
2118 struct ata_port *ap = dev->link->ap;
2119 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2120 unsigned int err_mask;
2121 char *aa_desc = "";
2122
2123 if (!ata_id_has_ncq(dev->id)) {
2124 desc[0] = '\0';
2125 return 0;
2126 }
2127 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2128 snprintf(desc, desc_sz, "NCQ (not used)");
2129 return 0;
2130 }
2131 if (ap->flags & ATA_FLAG_NCQ) {
2132 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2133 dev->flags |= ATA_DFLAG_NCQ;
2134 }
2135
2136 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2137 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2138 ata_id_has_fpdma_aa(dev->id)) {
2139 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2140 SATA_FPDMA_AA);
2141 if (err_mask) {
2142 ata_dev_err(dev,
2143 "failed to enable AA (error_mask=0x%x)\n",
2144 err_mask);
2145 if (err_mask != AC_ERR_DEV) {
2146 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2147 return -EIO;
2148 }
2149 } else
2150 aa_desc = ", AA";
2151 }
2152
2153 if (hdepth >= ddepth)
2154 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2155 else
2156 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2157 ddepth, aa_desc);
2158
2159 if ((ap->flags & ATA_FLAG_FPDMA_AUX) &&
2160 ata_id_has_ncq_send_and_recv(dev->id)) {
2161 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2162 0, ap->sector_buf, 1);
2163 if (err_mask) {
2164 ata_dev_dbg(dev,
2165 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2166 err_mask);
2167 } else {
2168 u8 *cmds = dev->ncq_send_recv_cmds;
2169
2170 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2171 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2172
2173 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2174 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2175 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2176 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2177 }
2178 }
2179 }
2180
2181 return 0;
2182 }
2183
2184 /**
2185 * ata_dev_configure - Configure the specified ATA/ATAPI device
2186 * @dev: Target device to configure
2187 *
2188 * Configure @dev according to @dev->id. Generic and low-level
2189 * driver specific fixups are also applied.
2190 *
2191 * LOCKING:
2192 * Kernel thread context (may sleep)
2193 *
2194 * RETURNS:
2195 * 0 on success, -errno otherwise
2196 */
2197 int ata_dev_configure(struct ata_device *dev)
2198 {
2199 struct ata_port *ap = dev->link->ap;
2200 struct ata_eh_context *ehc = &dev->link->eh_context;
2201 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2202 const u16 *id = dev->id;
2203 unsigned long xfer_mask;
2204 unsigned int err_mask;
2205 char revbuf[7]; /* XYZ-99\0 */
2206 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2207 char modelbuf[ATA_ID_PROD_LEN+1];
2208 int rc;
2209
2210 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2211 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2212 return 0;
2213 }
2214
2215 if (ata_msg_probe(ap))
2216 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2217
2218 /* set horkage */
2219 dev->horkage |= ata_dev_blacklisted(dev);
2220 ata_force_horkage(dev);
2221
2222 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2223 ata_dev_info(dev, "unsupported device, disabling\n");
2224 ata_dev_disable(dev);
2225 return 0;
2226 }
2227
2228 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2229 dev->class == ATA_DEV_ATAPI) {
2230 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2231 atapi_enabled ? "not supported with this driver"
2232 : "disabled");
2233 ata_dev_disable(dev);
2234 return 0;
2235 }
2236
2237 rc = ata_do_link_spd_horkage(dev);
2238 if (rc)
2239 return rc;
2240
2241 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2242 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2243 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2244 dev->horkage |= ATA_HORKAGE_NOLPM;
2245
2246 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2247 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2248 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2249 }
2250
2251 /* let ACPI work its magic */
2252 rc = ata_acpi_on_devcfg(dev);
2253 if (rc)
2254 return rc;
2255
2256 /* massage HPA, do it early as it might change IDENTIFY data */
2257 rc = ata_hpa_resize(dev);
2258 if (rc)
2259 return rc;
2260
2261 /* print device capabilities */
2262 if (ata_msg_probe(ap))
2263 ata_dev_dbg(dev,
2264 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2265 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2266 __func__,
2267 id[49], id[82], id[83], id[84],
2268 id[85], id[86], id[87], id[88]);
2269
2270 /* initialize to-be-configured parameters */
2271 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2272 dev->max_sectors = 0;
2273 dev->cdb_len = 0;
2274 dev->n_sectors = 0;
2275 dev->cylinders = 0;
2276 dev->heads = 0;
2277 dev->sectors = 0;
2278 dev->multi_count = 0;
2279
2280 /*
2281 * common ATA, ATAPI feature tests
2282 */
2283
2284 /* find max transfer mode; for printk only */
2285 xfer_mask = ata_id_xfermask(id);
2286
2287 if (ata_msg_probe(ap))
2288 ata_dump_id(id);
2289
2290 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2291 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2292 sizeof(fwrevbuf));
2293
2294 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2295 sizeof(modelbuf));
2296
2297 /* ATA-specific feature tests */
2298 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2299 if (ata_id_is_cfa(id)) {
2300 /* CPRM may make this media unusable */
2301 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2302 ata_dev_warn(dev,
2303 "supports DRM functions and may not be fully accessible\n");
2304 snprintf(revbuf, 7, "CFA");
2305 } else {
2306 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2307 /* Warn the user if the device has TPM extensions */
2308 if (ata_id_has_tpm(id))
2309 ata_dev_warn(dev,
2310 "supports DRM functions and may not be fully accessible\n");
2311 }
2312
2313 dev->n_sectors = ata_id_n_sectors(id);
2314
2315 /* get current R/W Multiple count setting */
2316 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2317 unsigned int max = dev->id[47] & 0xff;
2318 unsigned int cnt = dev->id[59] & 0xff;
2319 /* only recognize/allow powers of two here */
2320 if (is_power_of_2(max) && is_power_of_2(cnt))
2321 if (cnt <= max)
2322 dev->multi_count = cnt;
2323 }
2324
2325 if (ata_id_has_lba(id)) {
2326 const char *lba_desc;
2327 char ncq_desc[24];
2328
2329 lba_desc = "LBA";
2330 dev->flags |= ATA_DFLAG_LBA;
2331 if (ata_id_has_lba48(id)) {
2332 dev->flags |= ATA_DFLAG_LBA48;
2333 lba_desc = "LBA48";
2334
2335 if (dev->n_sectors >= (1UL << 28) &&
2336 ata_id_has_flush_ext(id))
2337 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2338 }
2339
2340 /* config NCQ */
2341 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2342 if (rc)
2343 return rc;
2344
2345 /* print device info to dmesg */
2346 if (ata_msg_drv(ap) && print_info) {
2347 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2348 revbuf, modelbuf, fwrevbuf,
2349 ata_mode_string(xfer_mask));
2350 ata_dev_info(dev,
2351 "%llu sectors, multi %u: %s %s\n",
2352 (unsigned long long)dev->n_sectors,
2353 dev->multi_count, lba_desc, ncq_desc);
2354 }
2355 } else {
2356 /* CHS */
2357
2358 /* Default translation */
2359 dev->cylinders = id[1];
2360 dev->heads = id[3];
2361 dev->sectors = id[6];
2362
2363 if (ata_id_current_chs_valid(id)) {
2364 /* Current CHS translation is valid. */
2365 dev->cylinders = id[54];
2366 dev->heads = id[55];
2367 dev->sectors = id[56];
2368 }
2369
2370 /* print device info to dmesg */
2371 if (ata_msg_drv(ap) && print_info) {
2372 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2373 revbuf, modelbuf, fwrevbuf,
2374 ata_mode_string(xfer_mask));
2375 ata_dev_info(dev,
2376 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2377 (unsigned long long)dev->n_sectors,
2378 dev->multi_count, dev->cylinders,
2379 dev->heads, dev->sectors);
2380 }
2381 }
2382
2383 /* Check and mark DevSlp capability. Get DevSlp timing variables
2384 * from SATA Settings page of Identify Device Data Log.
2385 */
2386 if (ata_id_has_devslp(dev->id)) {
2387 u8 *sata_setting = ap->sector_buf;
2388 int i, j;
2389
2390 dev->flags |= ATA_DFLAG_DEVSLP;
2391 err_mask = ata_read_log_page(dev,
2392 ATA_LOG_SATA_ID_DEV_DATA,
2393 ATA_LOG_SATA_SETTINGS,
2394 sata_setting,
2395 1);
2396 if (err_mask)
2397 ata_dev_dbg(dev,
2398 "failed to get Identify Device Data, Emask 0x%x\n",
2399 err_mask);
2400 else
2401 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2402 j = ATA_LOG_DEVSLP_OFFSET + i;
2403 dev->devslp_timing[i] = sata_setting[j];
2404 }
2405 }
2406
2407 dev->cdb_len = 16;
2408 }
2409
2410 /* ATAPI-specific feature tests */
2411 else if (dev->class == ATA_DEV_ATAPI) {
2412 const char *cdb_intr_string = "";
2413 const char *atapi_an_string = "";
2414 const char *dma_dir_string = "";
2415 u32 sntf;
2416
2417 rc = atapi_cdb_len(id);
2418 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2419 if (ata_msg_warn(ap))
2420 ata_dev_warn(dev, "unsupported CDB len\n");
2421 rc = -EINVAL;
2422 goto err_out_nosup;
2423 }
2424 dev->cdb_len = (unsigned int) rc;
2425
2426 /* Enable ATAPI AN if both the host and device have
2427 * the support. If PMP is attached, SNTF is required
2428 * to enable ATAPI AN to discern between PHY status
2429 * changed notifications and ATAPI ANs.
2430 */
2431 if (atapi_an &&
2432 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2433 (!sata_pmp_attached(ap) ||
2434 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2435 /* issue SET feature command to turn this on */
2436 err_mask = ata_dev_set_feature(dev,
2437 SETFEATURES_SATA_ENABLE, SATA_AN);
2438 if (err_mask)
2439 ata_dev_err(dev,
2440 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2441 err_mask);
2442 else {
2443 dev->flags |= ATA_DFLAG_AN;
2444 atapi_an_string = ", ATAPI AN";
2445 }
2446 }
2447
2448 if (ata_id_cdb_intr(dev->id)) {
2449 dev->flags |= ATA_DFLAG_CDB_INTR;
2450 cdb_intr_string = ", CDB intr";
2451 }
2452
2453 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2454 dev->flags |= ATA_DFLAG_DMADIR;
2455 dma_dir_string = ", DMADIR";
2456 }
2457
2458 if (ata_id_has_da(dev->id)) {
2459 dev->flags |= ATA_DFLAG_DA;
2460 zpodd_init(dev);
2461 }
2462
2463 /* print device info to dmesg */
2464 if (ata_msg_drv(ap) && print_info)
2465 ata_dev_info(dev,
2466 "ATAPI: %s, %s, max %s%s%s%s\n",
2467 modelbuf, fwrevbuf,
2468 ata_mode_string(xfer_mask),
2469 cdb_intr_string, atapi_an_string,
2470 dma_dir_string);
2471 }
2472
2473 /* determine max_sectors */
2474 dev->max_sectors = ATA_MAX_SECTORS;
2475 if (dev->flags & ATA_DFLAG_LBA48)
2476 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2477
2478 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2479 200 sectors */
2480 if (ata_dev_knobble(dev)) {
2481 if (ata_msg_drv(ap) && print_info)
2482 ata_dev_info(dev, "applying bridge limits\n");
2483 dev->udma_mask &= ATA_UDMA5;
2484 dev->max_sectors = ATA_MAX_SECTORS;
2485 }
2486
2487 if ((dev->class == ATA_DEV_ATAPI) &&
2488 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2489 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2490 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2491 }
2492
2493 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2494 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2495 dev->max_sectors);
2496
2497 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2498 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2499
2500 if (ap->ops->dev_config)
2501 ap->ops->dev_config(dev);
2502
2503 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2504 /* Let the user know. We don't want to disallow opens for
2505 rescue purposes, or in case the vendor is just a blithering
2506 idiot. Do this after the dev_config call as some controllers
2507 with buggy firmware may want to avoid reporting false device
2508 bugs */
2509
2510 if (print_info) {
2511 ata_dev_warn(dev,
2512 "Drive reports diagnostics failure. This may indicate a drive\n");
2513 ata_dev_warn(dev,
2514 "fault or invalid emulation. Contact drive vendor for information.\n");
2515 }
2516 }
2517
2518 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2519 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2520 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2521 }
2522
2523 return 0;
2524
2525 err_out_nosup:
2526 if (ata_msg_probe(ap))
2527 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2528 return rc;
2529 }
2530
2531 /**
2532 * ata_cable_40wire - return 40 wire cable type
2533 * @ap: port
2534 *
2535 * Helper method for drivers which want to hardwire 40 wire cable
2536 * detection.
2537 */
2538
2539 int ata_cable_40wire(struct ata_port *ap)
2540 {
2541 return ATA_CBL_PATA40;
2542 }
2543
2544 /**
2545 * ata_cable_80wire - return 80 wire cable type
2546 * @ap: port
2547 *
2548 * Helper method for drivers which want to hardwire 80 wire cable
2549 * detection.
2550 */
2551
2552 int ata_cable_80wire(struct ata_port *ap)
2553 {
2554 return ATA_CBL_PATA80;
2555 }
2556
2557 /**
2558 * ata_cable_unknown - return unknown PATA cable.
2559 * @ap: port
2560 *
2561 * Helper method for drivers which have no PATA cable detection.
2562 */
2563
2564 int ata_cable_unknown(struct ata_port *ap)
2565 {
2566 return ATA_CBL_PATA_UNK;
2567 }
2568
2569 /**
2570 * ata_cable_ignore - return ignored PATA cable.
2571 * @ap: port
2572 *
2573 * Helper method for drivers which don't use cable type to limit
2574 * transfer mode.
2575 */
2576 int ata_cable_ignore(struct ata_port *ap)
2577 {
2578 return ATA_CBL_PATA_IGN;
2579 }
2580
2581 /**
2582 * ata_cable_sata - return SATA cable type
2583 * @ap: port
2584 *
2585 * Helper method for drivers which have SATA cables
2586 */
2587
2588 int ata_cable_sata(struct ata_port *ap)
2589 {
2590 return ATA_CBL_SATA;
2591 }
2592
2593 /**
2594 * ata_bus_probe - Reset and probe ATA bus
2595 * @ap: Bus to probe
2596 *
2597 * Master ATA bus probing function. Initiates a hardware-dependent
2598 * bus reset, then attempts to identify any devices found on
2599 * the bus.
2600 *
2601 * LOCKING:
2602 * PCI/etc. bus probe sem.
2603 *
2604 * RETURNS:
2605 * Zero on success, negative errno otherwise.
2606 */
2607
2608 int ata_bus_probe(struct ata_port *ap)
2609 {
2610 unsigned int classes[ATA_MAX_DEVICES];
2611 int tries[ATA_MAX_DEVICES];
2612 int rc;
2613 struct ata_device *dev;
2614
2615 ata_for_each_dev(dev, &ap->link, ALL)
2616 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2617
2618 retry:
2619 ata_for_each_dev(dev, &ap->link, ALL) {
2620 /* If we issue an SRST then an ATA drive (not ATAPI)
2621 * may change configuration and be in PIO0 timing. If
2622 * we do a hard reset (or are coming from power on)
2623 * this is true for ATA or ATAPI. Until we've set a
2624 * suitable controller mode we should not touch the
2625 * bus as we may be talking too fast.
2626 */
2627 dev->pio_mode = XFER_PIO_0;
2628 dev->dma_mode = 0xff;
2629
2630 /* If the controller has a pio mode setup function
2631 * then use it to set the chipset to rights. Don't
2632 * touch the DMA setup as that will be dealt with when
2633 * configuring devices.
2634 */
2635 if (ap->ops->set_piomode)
2636 ap->ops->set_piomode(ap, dev);
2637 }
2638
2639 /* reset and determine device classes */
2640 ap->ops->phy_reset(ap);
2641
2642 ata_for_each_dev(dev, &ap->link, ALL) {
2643 if (dev->class != ATA_DEV_UNKNOWN)
2644 classes[dev->devno] = dev->class;
2645 else
2646 classes[dev->devno] = ATA_DEV_NONE;
2647
2648 dev->class = ATA_DEV_UNKNOWN;
2649 }
2650
2651 /* read IDENTIFY page and configure devices. We have to do the identify
2652 specific sequence bass-ackwards so that PDIAG- is released by
2653 the slave device */
2654
2655 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2656 if (tries[dev->devno])
2657 dev->class = classes[dev->devno];
2658
2659 if (!ata_dev_enabled(dev))
2660 continue;
2661
2662 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2663 dev->id);
2664 if (rc)
2665 goto fail;
2666 }
2667
2668 /* Now ask for the cable type as PDIAG- should have been released */
2669 if (ap->ops->cable_detect)
2670 ap->cbl = ap->ops->cable_detect(ap);
2671
2672 /* We may have SATA bridge glue hiding here irrespective of
2673 * the reported cable types and sensed types. When SATA
2674 * drives indicate we have a bridge, we don't know which end
2675 * of the link the bridge is which is a problem.
2676 */
2677 ata_for_each_dev(dev, &ap->link, ENABLED)
2678 if (ata_id_is_sata(dev->id))
2679 ap->cbl = ATA_CBL_SATA;
2680
2681 /* After the identify sequence we can now set up the devices. We do
2682 this in the normal order so that the user doesn't get confused */
2683
2684 ata_for_each_dev(dev, &ap->link, ENABLED) {
2685 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2686 rc = ata_dev_configure(dev);
2687 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2688 if (rc)
2689 goto fail;
2690 }
2691
2692 /* configure transfer mode */
2693 rc = ata_set_mode(&ap->link, &dev);
2694 if (rc)
2695 goto fail;
2696
2697 ata_for_each_dev(dev, &ap->link, ENABLED)
2698 return 0;
2699
2700 return -ENODEV;
2701
2702 fail:
2703 tries[dev->devno]--;
2704
2705 switch (rc) {
2706 case -EINVAL:
2707 /* eeek, something went very wrong, give up */
2708 tries[dev->devno] = 0;
2709 break;
2710
2711 case -ENODEV:
2712 /* give it just one more chance */
2713 tries[dev->devno] = min(tries[dev->devno], 1);
2714 case -EIO:
2715 if (tries[dev->devno] == 1) {
2716 /* This is the last chance, better to slow
2717 * down than lose it.
2718 */
2719 sata_down_spd_limit(&ap->link, 0);
2720 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2721 }
2722 }
2723
2724 if (!tries[dev->devno])
2725 ata_dev_disable(dev);
2726
2727 goto retry;
2728 }
2729
2730 /**
2731 * sata_print_link_status - Print SATA link status
2732 * @link: SATA link to printk link status about
2733 *
2734 * This function prints link speed and status of a SATA link.
2735 *
2736 * LOCKING:
2737 * None.
2738 */
2739 static void sata_print_link_status(struct ata_link *link)
2740 {
2741 u32 sstatus, scontrol, tmp;
2742
2743 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2744 return;
2745 sata_scr_read(link, SCR_CONTROL, &scontrol);
2746
2747 if (ata_phys_link_online(link)) {
2748 tmp = (sstatus >> 4) & 0xf;
2749 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2750 sata_spd_string(tmp), sstatus, scontrol);
2751 } else {
2752 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2753 sstatus, scontrol);
2754 }
2755 }
2756
2757 /**
2758 * ata_dev_pair - return other device on cable
2759 * @adev: device
2760 *
2761 * Obtain the other device on the same cable, or if none is
2762 * present NULL is returned
2763 */
2764
2765 struct ata_device *ata_dev_pair(struct ata_device *adev)
2766 {
2767 struct ata_link *link = adev->link;
2768 struct ata_device *pair = &link->device[1 - adev->devno];
2769 if (!ata_dev_enabled(pair))
2770 return NULL;
2771 return pair;
2772 }
2773
2774 /**
2775 * sata_down_spd_limit - adjust SATA spd limit downward
2776 * @link: Link to adjust SATA spd limit for
2777 * @spd_limit: Additional limit
2778 *
2779 * Adjust SATA spd limit of @link downward. Note that this
2780 * function only adjusts the limit. The change must be applied
2781 * using sata_set_spd().
2782 *
2783 * If @spd_limit is non-zero, the speed is limited to equal to or
2784 * lower than @spd_limit if such speed is supported. If
2785 * @spd_limit is slower than any supported speed, only the lowest
2786 * supported speed is allowed.
2787 *
2788 * LOCKING:
2789 * Inherited from caller.
2790 *
2791 * RETURNS:
2792 * 0 on success, negative errno on failure
2793 */
2794 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2795 {
2796 u32 sstatus, spd, mask;
2797 int rc, bit;
2798
2799 if (!sata_scr_valid(link))
2800 return -EOPNOTSUPP;
2801
2802 /* If SCR can be read, use it to determine the current SPD.
2803 * If not, use cached value in link->sata_spd.
2804 */
2805 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2806 if (rc == 0 && ata_sstatus_online(sstatus))
2807 spd = (sstatus >> 4) & 0xf;
2808 else
2809 spd = link->sata_spd;
2810
2811 mask = link->sata_spd_limit;
2812 if (mask <= 1)
2813 return -EINVAL;
2814
2815 /* unconditionally mask off the highest bit */
2816 bit = fls(mask) - 1;
2817 mask &= ~(1 << bit);
2818
2819 /* Mask off all speeds higher than or equal to the current
2820 * one. Force 1.5Gbps if current SPD is not available.
2821 */
2822 if (spd > 1)
2823 mask &= (1 << (spd - 1)) - 1;
2824 else
2825 mask &= 1;
2826
2827 /* were we already at the bottom? */
2828 if (!mask)
2829 return -EINVAL;
2830
2831 if (spd_limit) {
2832 if (mask & ((1 << spd_limit) - 1))
2833 mask &= (1 << spd_limit) - 1;
2834 else {
2835 bit = ffs(mask) - 1;
2836 mask = 1 << bit;
2837 }
2838 }
2839
2840 link->sata_spd_limit = mask;
2841
2842 ata_link_warn(link, "limiting SATA link speed to %s\n",
2843 sata_spd_string(fls(mask)));
2844
2845 return 0;
2846 }
2847
2848 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2849 {
2850 struct ata_link *host_link = &link->ap->link;
2851 u32 limit, target, spd;
2852
2853 limit = link->sata_spd_limit;
2854
2855 /* Don't configure downstream link faster than upstream link.
2856 * It doesn't speed up anything and some PMPs choke on such
2857 * configuration.
2858 */
2859 if (!ata_is_host_link(link) && host_link->sata_spd)
2860 limit &= (1 << host_link->sata_spd) - 1;
2861
2862 if (limit == UINT_MAX)
2863 target = 0;
2864 else
2865 target = fls(limit);
2866
2867 spd = (*scontrol >> 4) & 0xf;
2868 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2869
2870 return spd != target;
2871 }
2872
2873 /**
2874 * sata_set_spd_needed - is SATA spd configuration needed
2875 * @link: Link in question
2876 *
2877 * Test whether the spd limit in SControl matches
2878 * @link->sata_spd_limit. This function is used to determine
2879 * whether hardreset is necessary to apply SATA spd
2880 * configuration.
2881 *
2882 * LOCKING:
2883 * Inherited from caller.
2884 *
2885 * RETURNS:
2886 * 1 if SATA spd configuration is needed, 0 otherwise.
2887 */
2888 static int sata_set_spd_needed(struct ata_link *link)
2889 {
2890 u32 scontrol;
2891
2892 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2893 return 1;
2894
2895 return __sata_set_spd_needed(link, &scontrol);
2896 }
2897
2898 /**
2899 * sata_set_spd - set SATA spd according to spd limit
2900 * @link: Link to set SATA spd for
2901 *
2902 * Set SATA spd of @link according to sata_spd_limit.
2903 *
2904 * LOCKING:
2905 * Inherited from caller.
2906 *
2907 * RETURNS:
2908 * 0 if spd doesn't need to be changed, 1 if spd has been
2909 * changed. Negative errno if SCR registers are inaccessible.
2910 */
2911 int sata_set_spd(struct ata_link *link)
2912 {
2913 u32 scontrol;
2914 int rc;
2915
2916 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2917 return rc;
2918
2919 if (!__sata_set_spd_needed(link, &scontrol))
2920 return 0;
2921
2922 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2923 return rc;
2924
2925 return 1;
2926 }
2927
2928 /*
2929 * This mode timing computation functionality is ported over from
2930 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2931 */
2932 /*
2933 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2934 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2935 * for UDMA6, which is currently supported only by Maxtor drives.
2936 *
2937 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2938 */
2939
2940 static const struct ata_timing ata_timing[] = {
2941 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
2942 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
2943 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
2944 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
2945 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
2946 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
2947 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
2948 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
2949
2950 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
2951 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
2952 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
2953
2954 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
2955 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
2956 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
2957 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
2958 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
2959
2960 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2961 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
2962 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
2963 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
2964 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
2965 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
2966 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
2967 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
2968
2969 { 0xFF }
2970 };
2971
2972 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2973 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2974
2975 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2976 {
2977 q->setup = EZ(t->setup * 1000, T);
2978 q->act8b = EZ(t->act8b * 1000, T);
2979 q->rec8b = EZ(t->rec8b * 1000, T);
2980 q->cyc8b = EZ(t->cyc8b * 1000, T);
2981 q->active = EZ(t->active * 1000, T);
2982 q->recover = EZ(t->recover * 1000, T);
2983 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
2984 q->cycle = EZ(t->cycle * 1000, T);
2985 q->udma = EZ(t->udma * 1000, UT);
2986 }
2987
2988 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2989 struct ata_timing *m, unsigned int what)
2990 {
2991 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2992 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2993 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2994 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2995 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2996 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2997 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
2998 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2999 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3000 }
3001
3002 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3003 {
3004 const struct ata_timing *t = ata_timing;
3005
3006 while (xfer_mode > t->mode)
3007 t++;
3008
3009 if (xfer_mode == t->mode)
3010 return t;
3011
3012 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3013 __func__, xfer_mode);
3014
3015 return NULL;
3016 }
3017
3018 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3019 struct ata_timing *t, int T, int UT)
3020 {
3021 const u16 *id = adev->id;
3022 const struct ata_timing *s;
3023 struct ata_timing p;
3024
3025 /*
3026 * Find the mode.
3027 */
3028
3029 if (!(s = ata_timing_find_mode(speed)))
3030 return -EINVAL;
3031
3032 memcpy(t, s, sizeof(*s));
3033
3034 /*
3035 * If the drive is an EIDE drive, it can tell us it needs extended
3036 * PIO/MW_DMA cycle timing.
3037 */
3038
3039 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3040 memset(&p, 0, sizeof(p));
3041
3042 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3043 if (speed <= XFER_PIO_2)
3044 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3045 else if ((speed <= XFER_PIO_4) ||
3046 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3047 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3048 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3049 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3050
3051 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3052 }
3053
3054 /*
3055 * Convert the timing to bus clock counts.
3056 */
3057
3058 ata_timing_quantize(t, t, T, UT);
3059
3060 /*
3061 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3062 * S.M.A.R.T * and some other commands. We have to ensure that the
3063 * DMA cycle timing is slower/equal than the fastest PIO timing.
3064 */
3065
3066 if (speed > XFER_PIO_6) {
3067 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3068 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3069 }
3070
3071 /*
3072 * Lengthen active & recovery time so that cycle time is correct.
3073 */
3074
3075 if (t->act8b + t->rec8b < t->cyc8b) {
3076 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3077 t->rec8b = t->cyc8b - t->act8b;
3078 }
3079
3080 if (t->active + t->recover < t->cycle) {
3081 t->active += (t->cycle - (t->active + t->recover)) / 2;
3082 t->recover = t->cycle - t->active;
3083 }
3084
3085 /* In a few cases quantisation may produce enough errors to
3086 leave t->cycle too low for the sum of active and recovery
3087 if so we must correct this */
3088 if (t->active + t->recover > t->cycle)
3089 t->cycle = t->active + t->recover;
3090
3091 return 0;
3092 }
3093
3094 /**
3095 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3096 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3097 * @cycle: cycle duration in ns
3098 *
3099 * Return matching xfer mode for @cycle. The returned mode is of
3100 * the transfer type specified by @xfer_shift. If @cycle is too
3101 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3102 * than the fastest known mode, the fasted mode is returned.
3103 *
3104 * LOCKING:
3105 * None.
3106 *
3107 * RETURNS:
3108 * Matching xfer_mode, 0xff if no match found.
3109 */
3110 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3111 {
3112 u8 base_mode = 0xff, last_mode = 0xff;
3113 const struct ata_xfer_ent *ent;
3114 const struct ata_timing *t;
3115
3116 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3117 if (ent->shift == xfer_shift)
3118 base_mode = ent->base;
3119
3120 for (t = ata_timing_find_mode(base_mode);
3121 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3122 unsigned short this_cycle;
3123
3124 switch (xfer_shift) {
3125 case ATA_SHIFT_PIO:
3126 case ATA_SHIFT_MWDMA:
3127 this_cycle = t->cycle;
3128 break;
3129 case ATA_SHIFT_UDMA:
3130 this_cycle = t->udma;
3131 break;
3132 default:
3133 return 0xff;
3134 }
3135
3136 if (cycle > this_cycle)
3137 break;
3138
3139 last_mode = t->mode;
3140 }
3141
3142 return last_mode;
3143 }
3144
3145 /**
3146 * ata_down_xfermask_limit - adjust dev xfer masks downward
3147 * @dev: Device to adjust xfer masks
3148 * @sel: ATA_DNXFER_* selector
3149 *
3150 * Adjust xfer masks of @dev downward. Note that this function
3151 * does not apply the change. Invoking ata_set_mode() afterwards
3152 * will apply the limit.
3153 *
3154 * LOCKING:
3155 * Inherited from caller.
3156 *
3157 * RETURNS:
3158 * 0 on success, negative errno on failure
3159 */
3160 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3161 {
3162 char buf[32];
3163 unsigned long orig_mask, xfer_mask;
3164 unsigned long pio_mask, mwdma_mask, udma_mask;
3165 int quiet, highbit;
3166
3167 quiet = !!(sel & ATA_DNXFER_QUIET);
3168 sel &= ~ATA_DNXFER_QUIET;
3169
3170 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3171 dev->mwdma_mask,
3172 dev->udma_mask);
3173 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3174
3175 switch (sel) {
3176 case ATA_DNXFER_PIO:
3177 highbit = fls(pio_mask) - 1;
3178 pio_mask &= ~(1 << highbit);
3179 break;
3180
3181 case ATA_DNXFER_DMA:
3182 if (udma_mask) {
3183 highbit = fls(udma_mask) - 1;
3184 udma_mask &= ~(1 << highbit);
3185 if (!udma_mask)
3186 return -ENOENT;
3187 } else if (mwdma_mask) {
3188 highbit = fls(mwdma_mask) - 1;
3189 mwdma_mask &= ~(1 << highbit);
3190 if (!mwdma_mask)
3191 return -ENOENT;
3192 }
3193 break;
3194
3195 case ATA_DNXFER_40C:
3196 udma_mask &= ATA_UDMA_MASK_40C;
3197 break;
3198
3199 case ATA_DNXFER_FORCE_PIO0:
3200 pio_mask &= 1;
3201 case ATA_DNXFER_FORCE_PIO:
3202 mwdma_mask = 0;
3203 udma_mask = 0;
3204 break;
3205
3206 default:
3207 BUG();
3208 }
3209
3210 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3211
3212 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3213 return -ENOENT;
3214
3215 if (!quiet) {
3216 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3217 snprintf(buf, sizeof(buf), "%s:%s",
3218 ata_mode_string(xfer_mask),
3219 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3220 else
3221 snprintf(buf, sizeof(buf), "%s",
3222 ata_mode_string(xfer_mask));
3223
3224 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3225 }
3226
3227 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3228 &dev->udma_mask);
3229
3230 return 0;
3231 }
3232
3233 static int ata_dev_set_mode(struct ata_device *dev)
3234 {
3235 struct ata_port *ap = dev->link->ap;
3236 struct ata_eh_context *ehc = &dev->link->eh_context;
3237 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3238 const char *dev_err_whine = "";
3239 int ign_dev_err = 0;
3240 unsigned int err_mask = 0;
3241 int rc;
3242
3243 dev->flags &= ~ATA_DFLAG_PIO;
3244 if (dev->xfer_shift == ATA_SHIFT_PIO)
3245 dev->flags |= ATA_DFLAG_PIO;
3246
3247 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3248 dev_err_whine = " (SET_XFERMODE skipped)";
3249 else {
3250 if (nosetxfer)
3251 ata_dev_warn(dev,
3252 "NOSETXFER but PATA detected - can't "
3253 "skip SETXFER, might malfunction\n");
3254 err_mask = ata_dev_set_xfermode(dev);
3255 }
3256
3257 if (err_mask & ~AC_ERR_DEV)
3258 goto fail;
3259
3260 /* revalidate */
3261 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3262 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3263 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3264 if (rc)
3265 return rc;
3266
3267 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3268 /* Old CFA may refuse this command, which is just fine */
3269 if (ata_id_is_cfa(dev->id))
3270 ign_dev_err = 1;
3271 /* Catch several broken garbage emulations plus some pre
3272 ATA devices */
3273 if (ata_id_major_version(dev->id) == 0 &&
3274 dev->pio_mode <= XFER_PIO_2)
3275 ign_dev_err = 1;
3276 /* Some very old devices and some bad newer ones fail
3277 any kind of SET_XFERMODE request but support PIO0-2
3278 timings and no IORDY */
3279 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3280 ign_dev_err = 1;
3281 }
3282 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3283 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3284 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3285 dev->dma_mode == XFER_MW_DMA_0 &&
3286 (dev->id[63] >> 8) & 1)
3287 ign_dev_err = 1;
3288
3289 /* if the device is actually configured correctly, ignore dev err */
3290 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3291 ign_dev_err = 1;
3292
3293 if (err_mask & AC_ERR_DEV) {
3294 if (!ign_dev_err)
3295 goto fail;
3296 else
3297 dev_err_whine = " (device error ignored)";
3298 }
3299
3300 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3301 dev->xfer_shift, (int)dev->xfer_mode);
3302
3303 ata_dev_info(dev, "configured for %s%s\n",
3304 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3305 dev_err_whine);
3306
3307 return 0;
3308
3309 fail:
3310 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3311 return -EIO;
3312 }
3313
3314 /**
3315 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3316 * @link: link on which timings will be programmed
3317 * @r_failed_dev: out parameter for failed device
3318 *
3319 * Standard implementation of the function used to tune and set
3320 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3321 * ata_dev_set_mode() fails, pointer to the failing device is
3322 * returned in @r_failed_dev.
3323 *
3324 * LOCKING:
3325 * PCI/etc. bus probe sem.
3326 *
3327 * RETURNS:
3328 * 0 on success, negative errno otherwise
3329 */
3330
3331 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3332 {
3333 struct ata_port *ap = link->ap;
3334 struct ata_device *dev;
3335 int rc = 0, used_dma = 0, found = 0;
3336
3337 /* step 1: calculate xfer_mask */
3338 ata_for_each_dev(dev, link, ENABLED) {
3339 unsigned long pio_mask, dma_mask;
3340 unsigned int mode_mask;
3341
3342 mode_mask = ATA_DMA_MASK_ATA;
3343 if (dev->class == ATA_DEV_ATAPI)
3344 mode_mask = ATA_DMA_MASK_ATAPI;
3345 else if (ata_id_is_cfa(dev->id))
3346 mode_mask = ATA_DMA_MASK_CFA;
3347
3348 ata_dev_xfermask(dev);
3349 ata_force_xfermask(dev);
3350
3351 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3352
3353 if (libata_dma_mask & mode_mask)
3354 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3355 dev->udma_mask);
3356 else
3357 dma_mask = 0;
3358
3359 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3360 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3361
3362 found = 1;
3363 if (ata_dma_enabled(dev))
3364 used_dma = 1;
3365 }
3366 if (!found)
3367 goto out;
3368
3369 /* step 2: always set host PIO timings */
3370 ata_for_each_dev(dev, link, ENABLED) {
3371 if (dev->pio_mode == 0xff) {
3372 ata_dev_warn(dev, "no PIO support\n");
3373 rc = -EINVAL;
3374 goto out;
3375 }
3376
3377 dev->xfer_mode = dev->pio_mode;
3378 dev->xfer_shift = ATA_SHIFT_PIO;
3379 if (ap->ops->set_piomode)
3380 ap->ops->set_piomode(ap, dev);
3381 }
3382
3383 /* step 3: set host DMA timings */
3384 ata_for_each_dev(dev, link, ENABLED) {
3385 if (!ata_dma_enabled(dev))
3386 continue;
3387
3388 dev->xfer_mode = dev->dma_mode;
3389 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3390 if (ap->ops->set_dmamode)
3391 ap->ops->set_dmamode(ap, dev);
3392 }
3393
3394 /* step 4: update devices' xfer mode */
3395 ata_for_each_dev(dev, link, ENABLED) {
3396 rc = ata_dev_set_mode(dev);
3397 if (rc)
3398 goto out;
3399 }
3400
3401 /* Record simplex status. If we selected DMA then the other
3402 * host channels are not permitted to do so.
3403 */
3404 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3405 ap->host->simplex_claimed = ap;
3406
3407 out:
3408 if (rc)
3409 *r_failed_dev = dev;
3410 return rc;
3411 }
3412
3413 /**
3414 * ata_wait_ready - wait for link to become ready
3415 * @link: link to be waited on
3416 * @deadline: deadline jiffies for the operation
3417 * @check_ready: callback to check link readiness
3418 *
3419 * Wait for @link to become ready. @check_ready should return
3420 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3421 * link doesn't seem to be occupied, other errno for other error
3422 * conditions.
3423 *
3424 * Transient -ENODEV conditions are allowed for
3425 * ATA_TMOUT_FF_WAIT.
3426 *
3427 * LOCKING:
3428 * EH context.
3429 *
3430 * RETURNS:
3431 * 0 if @linke is ready before @deadline; otherwise, -errno.
3432 */
3433 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3434 int (*check_ready)(struct ata_link *link))
3435 {
3436 unsigned long start = jiffies;
3437 unsigned long nodev_deadline;
3438 int warned = 0;
3439
3440 /* choose which 0xff timeout to use, read comment in libata.h */
3441 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3442 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3443 else
3444 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3445
3446 /* Slave readiness can't be tested separately from master. On
3447 * M/S emulation configuration, this function should be called
3448 * only on the master and it will handle both master and slave.
3449 */
3450 WARN_ON(link == link->ap->slave_link);
3451
3452 if (time_after(nodev_deadline, deadline))
3453 nodev_deadline = deadline;
3454
3455 while (1) {
3456 unsigned long now = jiffies;
3457 int ready, tmp;
3458
3459 ready = tmp = check_ready(link);
3460 if (ready > 0)
3461 return 0;
3462
3463 /*
3464 * -ENODEV could be transient. Ignore -ENODEV if link
3465 * is online. Also, some SATA devices take a long
3466 * time to clear 0xff after reset. Wait for
3467 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3468 * offline.
3469 *
3470 * Note that some PATA controllers (pata_ali) explode
3471 * if status register is read more than once when
3472 * there's no device attached.
3473 */
3474 if (ready == -ENODEV) {
3475 if (ata_link_online(link))
3476 ready = 0;
3477 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3478 !ata_link_offline(link) &&
3479 time_before(now, nodev_deadline))
3480 ready = 0;
3481 }
3482
3483 if (ready)
3484 return ready;
3485 if (time_after(now, deadline))
3486 return -EBUSY;
3487
3488 if (!warned && time_after(now, start + 5 * HZ) &&
3489 (deadline - now > 3 * HZ)) {
3490 ata_link_warn(link,
3491 "link is slow to respond, please be patient "
3492 "(ready=%d)\n", tmp);
3493 warned = 1;
3494 }
3495
3496 ata_msleep(link->ap, 50);
3497 }
3498 }
3499
3500 /**
3501 * ata_wait_after_reset - wait for link to become ready after reset
3502 * @link: link to be waited on
3503 * @deadline: deadline jiffies for the operation
3504 * @check_ready: callback to check link readiness
3505 *
3506 * Wait for @link to become ready after reset.
3507 *
3508 * LOCKING:
3509 * EH context.
3510 *
3511 * RETURNS:
3512 * 0 if @linke is ready before @deadline; otherwise, -errno.
3513 */
3514 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3515 int (*check_ready)(struct ata_link *link))
3516 {
3517 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3518
3519 return ata_wait_ready(link, deadline, check_ready);
3520 }
3521
3522 /**
3523 * sata_link_debounce - debounce SATA phy status
3524 * @link: ATA link to debounce SATA phy status for
3525 * @params: timing parameters { interval, duratinon, timeout } in msec
3526 * @deadline: deadline jiffies for the operation
3527 *
3528 * Make sure SStatus of @link reaches stable state, determined by
3529 * holding the same value where DET is not 1 for @duration polled
3530 * every @interval, before @timeout. Timeout constraints the
3531 * beginning of the stable state. Because DET gets stuck at 1 on
3532 * some controllers after hot unplugging, this functions waits
3533 * until timeout then returns 0 if DET is stable at 1.
3534 *
3535 * @timeout is further limited by @deadline. The sooner of the
3536 * two is used.
3537 *
3538 * LOCKING:
3539 * Kernel thread context (may sleep)
3540 *
3541 * RETURNS:
3542 * 0 on success, -errno on failure.
3543 */
3544 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3545 unsigned long deadline)
3546 {
3547 unsigned long interval = params[0];
3548 unsigned long duration = params[1];
3549 unsigned long last_jiffies, t;
3550 u32 last, cur;
3551 int rc;
3552
3553 t = ata_deadline(jiffies, params[2]);
3554 if (time_before(t, deadline))
3555 deadline = t;
3556
3557 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3558 return rc;
3559 cur &= 0xf;
3560
3561 last = cur;
3562 last_jiffies = jiffies;
3563
3564 while (1) {
3565 ata_msleep(link->ap, interval);
3566 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3567 return rc;
3568 cur &= 0xf;
3569
3570 /* DET stable? */
3571 if (cur == last) {
3572 if (cur == 1 && time_before(jiffies, deadline))
3573 continue;
3574 if (time_after(jiffies,
3575 ata_deadline(last_jiffies, duration)))
3576 return 0;
3577 continue;
3578 }
3579
3580 /* unstable, start over */
3581 last = cur;
3582 last_jiffies = jiffies;
3583
3584 /* Check deadline. If debouncing failed, return
3585 * -EPIPE to tell upper layer to lower link speed.
3586 */
3587 if (time_after(jiffies, deadline))
3588 return -EPIPE;
3589 }
3590 }
3591
3592 /**
3593 * sata_link_resume - resume SATA link
3594 * @link: ATA link to resume SATA
3595 * @params: timing parameters { interval, duratinon, timeout } in msec
3596 * @deadline: deadline jiffies for the operation
3597 *
3598 * Resume SATA phy @link and debounce it.
3599 *
3600 * LOCKING:
3601 * Kernel thread context (may sleep)
3602 *
3603 * RETURNS:
3604 * 0 on success, -errno on failure.
3605 */
3606 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3607 unsigned long deadline)
3608 {
3609 int tries = ATA_LINK_RESUME_TRIES;
3610 u32 scontrol, serror;
3611 int rc;
3612
3613 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3614 return rc;
3615
3616 /*
3617 * Writes to SControl sometimes get ignored under certain
3618 * controllers (ata_piix SIDPR). Make sure DET actually is
3619 * cleared.
3620 */
3621 do {
3622 scontrol = (scontrol & 0x0f0) | 0x300;
3623 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3624 return rc;
3625 /*
3626 * Some PHYs react badly if SStatus is pounded
3627 * immediately after resuming. Delay 200ms before
3628 * debouncing.
3629 */
3630 ata_msleep(link->ap, 200);
3631
3632 /* is SControl restored correctly? */
3633 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3634 return rc;
3635 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3636
3637 if ((scontrol & 0xf0f) != 0x300) {
3638 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3639 scontrol);
3640 return 0;
3641 }
3642
3643 if (tries < ATA_LINK_RESUME_TRIES)
3644 ata_link_warn(link, "link resume succeeded after %d retries\n",
3645 ATA_LINK_RESUME_TRIES - tries);
3646
3647 if ((rc = sata_link_debounce(link, params, deadline)))
3648 return rc;
3649
3650 /* clear SError, some PHYs require this even for SRST to work */
3651 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3652 rc = sata_scr_write(link, SCR_ERROR, serror);
3653
3654 return rc != -EINVAL ? rc : 0;
3655 }
3656
3657 /**
3658 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3659 * @link: ATA link to manipulate SControl for
3660 * @policy: LPM policy to configure
3661 * @spm_wakeup: initiate LPM transition to active state
3662 *
3663 * Manipulate the IPM field of the SControl register of @link
3664 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
3665 * @spm_wakeup is %true, the SPM field is manipulated to wake up
3666 * the link. This function also clears PHYRDY_CHG before
3667 * returning.
3668 *
3669 * LOCKING:
3670 * EH context.
3671 *
3672 * RETURNS:
3673 * 0 on succes, -errno otherwise.
3674 */
3675 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3676 bool spm_wakeup)
3677 {
3678 struct ata_eh_context *ehc = &link->eh_context;
3679 bool woken_up = false;
3680 u32 scontrol;
3681 int rc;
3682
3683 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3684 if (rc)
3685 return rc;
3686
3687 switch (policy) {
3688 case ATA_LPM_MAX_POWER:
3689 /* disable all LPM transitions */
3690 scontrol |= (0x7 << 8);
3691 /* initiate transition to active state */
3692 if (spm_wakeup) {
3693 scontrol |= (0x4 << 12);
3694 woken_up = true;
3695 }
3696 break;
3697 case ATA_LPM_MED_POWER:
3698 /* allow LPM to PARTIAL */
3699 scontrol &= ~(0x1 << 8);
3700 scontrol |= (0x6 << 8);
3701 break;
3702 case ATA_LPM_MIN_POWER:
3703 if (ata_link_nr_enabled(link) > 0)
3704 /* no restrictions on LPM transitions */
3705 scontrol &= ~(0x7 << 8);
3706 else {
3707 /* empty port, power off */
3708 scontrol &= ~0xf;
3709 scontrol |= (0x1 << 2);
3710 }
3711 break;
3712 default:
3713 WARN_ON(1);
3714 }
3715
3716 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
3717 if (rc)
3718 return rc;
3719
3720 /* give the link time to transit out of LPM state */
3721 if (woken_up)
3722 msleep(10);
3723
3724 /* clear PHYRDY_CHG from SError */
3725 ehc->i.serror &= ~SERR_PHYRDY_CHG;
3726 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
3727 }
3728
3729 /**
3730 * ata_std_prereset - prepare for reset
3731 * @link: ATA link to be reset
3732 * @deadline: deadline jiffies for the operation
3733 *
3734 * @link is about to be reset. Initialize it. Failure from
3735 * prereset makes libata abort whole reset sequence and give up
3736 * that port, so prereset should be best-effort. It does its
3737 * best to prepare for reset sequence but if things go wrong, it
3738 * should just whine, not fail.
3739 *
3740 * LOCKING:
3741 * Kernel thread context (may sleep)
3742 *
3743 * RETURNS:
3744 * 0 on success, -errno otherwise.
3745 */
3746 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3747 {
3748 struct ata_port *ap = link->ap;
3749 struct ata_eh_context *ehc = &link->eh_context;
3750 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3751 int rc;
3752
3753 /* if we're about to do hardreset, nothing more to do */
3754 if (ehc->i.action & ATA_EH_HARDRESET)
3755 return 0;
3756
3757 /* if SATA, resume link */
3758 if (ap->flags & ATA_FLAG_SATA) {
3759 rc = sata_link_resume(link, timing, deadline);
3760 /* whine about phy resume failure but proceed */
3761 if (rc && rc != -EOPNOTSUPP)
3762 ata_link_warn(link,
3763 "failed to resume link for reset (errno=%d)\n",
3764 rc);
3765 }
3766
3767 /* no point in trying softreset on offline link */
3768 if (ata_phys_link_offline(link))
3769 ehc->i.action &= ~ATA_EH_SOFTRESET;
3770
3771 return 0;
3772 }
3773
3774 /**
3775 * sata_link_hardreset - reset link via SATA phy reset
3776 * @link: link to reset
3777 * @timing: timing parameters { interval, duratinon, timeout } in msec
3778 * @deadline: deadline jiffies for the operation
3779 * @online: optional out parameter indicating link onlineness
3780 * @check_ready: optional callback to check link readiness
3781 *
3782 * SATA phy-reset @link using DET bits of SControl register.
3783 * After hardreset, link readiness is waited upon using
3784 * ata_wait_ready() if @check_ready is specified. LLDs are
3785 * allowed to not specify @check_ready and wait itself after this
3786 * function returns. Device classification is LLD's
3787 * responsibility.
3788 *
3789 * *@online is set to one iff reset succeeded and @link is online
3790 * after reset.
3791 *
3792 * LOCKING:
3793 * Kernel thread context (may sleep)
3794 *
3795 * RETURNS:
3796 * 0 on success, -errno otherwise.
3797 */
3798 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3799 unsigned long deadline,
3800 bool *online, int (*check_ready)(struct ata_link *))
3801 {
3802 u32 scontrol;
3803 int rc;
3804
3805 DPRINTK("ENTER\n");
3806
3807 if (online)
3808 *online = false;
3809
3810 if (sata_set_spd_needed(link)) {
3811 /* SATA spec says nothing about how to reconfigure
3812 * spd. To be on the safe side, turn off phy during
3813 * reconfiguration. This works for at least ICH7 AHCI
3814 * and Sil3124.
3815 */
3816 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3817 goto out;
3818
3819 scontrol = (scontrol & 0x0f0) | 0x304;
3820
3821 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3822 goto out;
3823
3824 sata_set_spd(link);
3825 }
3826
3827 /* issue phy wake/reset */
3828 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3829 goto out;
3830
3831 scontrol = (scontrol & 0x0f0) | 0x301;
3832
3833 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3834 goto out;
3835
3836 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3837 * 10.4.2 says at least 1 ms.
3838 */
3839 ata_msleep(link->ap, 1);
3840
3841 /* bring link back */
3842 rc = sata_link_resume(link, timing, deadline);
3843 if (rc)
3844 goto out;
3845 /* if link is offline nothing more to do */
3846 if (ata_phys_link_offline(link))
3847 goto out;
3848
3849 /* Link is online. From this point, -ENODEV too is an error. */
3850 if (online)
3851 *online = true;
3852
3853 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3854 /* If PMP is supported, we have to do follow-up SRST.
3855 * Some PMPs don't send D2H Reg FIS after hardreset if
3856 * the first port is empty. Wait only for
3857 * ATA_TMOUT_PMP_SRST_WAIT.
3858 */
3859 if (check_ready) {
3860 unsigned long pmp_deadline;
3861
3862 pmp_deadline = ata_deadline(jiffies,
3863 ATA_TMOUT_PMP_SRST_WAIT);
3864 if (time_after(pmp_deadline, deadline))
3865 pmp_deadline = deadline;
3866 ata_wait_ready(link, pmp_deadline, check_ready);
3867 }
3868 rc = -EAGAIN;
3869 goto out;
3870 }
3871
3872 rc = 0;
3873 if (check_ready)
3874 rc = ata_wait_ready(link, deadline, check_ready);
3875 out:
3876 if (rc && rc != -EAGAIN) {
3877 /* online is set iff link is online && reset succeeded */
3878 if (online)
3879 *online = false;
3880 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
3881 }
3882 DPRINTK("EXIT, rc=%d\n", rc);
3883 return rc;
3884 }
3885
3886 /**
3887 * sata_std_hardreset - COMRESET w/o waiting or classification
3888 * @link: link to reset
3889 * @class: resulting class of attached device
3890 * @deadline: deadline jiffies for the operation
3891 *
3892 * Standard SATA COMRESET w/o waiting or classification.
3893 *
3894 * LOCKING:
3895 * Kernel thread context (may sleep)
3896 *
3897 * RETURNS:
3898 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3899 */
3900 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3901 unsigned long deadline)
3902 {
3903 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3904 bool online;
3905 int rc;
3906
3907 /* do hardreset */
3908 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3909 return online ? -EAGAIN : rc;
3910 }
3911
3912 /**
3913 * ata_std_postreset - standard postreset callback
3914 * @link: the target ata_link
3915 * @classes: classes of attached devices
3916 *
3917 * This function is invoked after a successful reset. Note that
3918 * the device might have been reset more than once using
3919 * different reset methods before postreset is invoked.
3920 *
3921 * LOCKING:
3922 * Kernel thread context (may sleep)
3923 */
3924 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3925 {
3926 u32 serror;
3927
3928 DPRINTK("ENTER\n");
3929
3930 /* reset complete, clear SError */
3931 if (!sata_scr_read(link, SCR_ERROR, &serror))
3932 sata_scr_write(link, SCR_ERROR, serror);
3933
3934 /* print link status */
3935 sata_print_link_status(link);
3936
3937 DPRINTK("EXIT\n");
3938 }
3939
3940 /**
3941 * ata_dev_same_device - Determine whether new ID matches configured device
3942 * @dev: device to compare against
3943 * @new_class: class of the new device
3944 * @new_id: IDENTIFY page of the new device
3945 *
3946 * Compare @new_class and @new_id against @dev and determine
3947 * whether @dev is the device indicated by @new_class and
3948 * @new_id.
3949 *
3950 * LOCKING:
3951 * None.
3952 *
3953 * RETURNS:
3954 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3955 */
3956 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3957 const u16 *new_id)
3958 {
3959 const u16 *old_id = dev->id;
3960 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3961 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3962
3963 if (dev->class != new_class) {
3964 ata_dev_info(dev, "class mismatch %d != %d\n",
3965 dev->class, new_class);
3966 return 0;
3967 }
3968
3969 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3970 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3971 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3972 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3973
3974 if (strcmp(model[0], model[1])) {
3975 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3976 model[0], model[1]);
3977 return 0;
3978 }
3979
3980 if (strcmp(serial[0], serial[1])) {
3981 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3982 serial[0], serial[1]);
3983 return 0;
3984 }
3985
3986 return 1;
3987 }
3988
3989 /**
3990 * ata_dev_reread_id - Re-read IDENTIFY data
3991 * @dev: target ATA device
3992 * @readid_flags: read ID flags
3993 *
3994 * Re-read IDENTIFY page and make sure @dev is still attached to
3995 * the port.
3996 *
3997 * LOCKING:
3998 * Kernel thread context (may sleep)
3999 *
4000 * RETURNS:
4001 * 0 on success, negative errno otherwise
4002 */
4003 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4004 {
4005 unsigned int class = dev->class;
4006 u16 *id = (void *)dev->link->ap->sector_buf;
4007 int rc;
4008
4009 /* read ID data */
4010 rc = ata_dev_read_id(dev, &class, readid_flags, id);
4011 if (rc)
4012 return rc;
4013
4014 /* is the device still there? */
4015 if (!ata_dev_same_device(dev, class, id))
4016 return -ENODEV;
4017
4018 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4019 return 0;
4020 }
4021
4022 /**
4023 * ata_dev_revalidate - Revalidate ATA device
4024 * @dev: device to revalidate
4025 * @new_class: new class code
4026 * @readid_flags: read ID flags
4027 *
4028 * Re-read IDENTIFY page, make sure @dev is still attached to the
4029 * port and reconfigure it according to the new IDENTIFY page.
4030 *
4031 * LOCKING:
4032 * Kernel thread context (may sleep)
4033 *
4034 * RETURNS:
4035 * 0 on success, negative errno otherwise
4036 */
4037 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4038 unsigned int readid_flags)
4039 {
4040 u64 n_sectors = dev->n_sectors;
4041 u64 n_native_sectors = dev->n_native_sectors;
4042 int rc;
4043
4044 if (!ata_dev_enabled(dev))
4045 return -ENODEV;
4046
4047 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4048 if (ata_class_enabled(new_class) &&
4049 new_class != ATA_DEV_ATA &&
4050 new_class != ATA_DEV_ATAPI &&
4051 new_class != ATA_DEV_ZAC &&
4052 new_class != ATA_DEV_SEMB) {
4053 ata_dev_info(dev, "class mismatch %u != %u\n",
4054 dev->class, new_class);
4055 rc = -ENODEV;
4056 goto fail;
4057 }
4058
4059 /* re-read ID */
4060 rc = ata_dev_reread_id(dev, readid_flags);
4061 if (rc)
4062 goto fail;
4063
4064 /* configure device according to the new ID */
4065 rc = ata_dev_configure(dev);
4066 if (rc)
4067 goto fail;
4068
4069 /* verify n_sectors hasn't changed */
4070 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4071 dev->n_sectors == n_sectors)
4072 return 0;
4073
4074 /* n_sectors has changed */
4075 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4076 (unsigned long long)n_sectors,
4077 (unsigned long long)dev->n_sectors);
4078
4079 /*
4080 * Something could have caused HPA to be unlocked
4081 * involuntarily. If n_native_sectors hasn't changed and the
4082 * new size matches it, keep the device.
4083 */
4084 if (dev->n_native_sectors == n_native_sectors &&
4085 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4086 ata_dev_warn(dev,
4087 "new n_sectors matches native, probably "
4088 "late HPA unlock, n_sectors updated\n");
4089 /* use the larger n_sectors */
4090 return 0;
4091 }
4092
4093 /*
4094 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4095 * unlocking HPA in those cases.
4096 *
4097 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4098 */
4099 if (dev->n_native_sectors == n_native_sectors &&
4100 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4101 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4102 ata_dev_warn(dev,
4103 "old n_sectors matches native, probably "
4104 "late HPA lock, will try to unlock HPA\n");
4105 /* try unlocking HPA */
4106 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4107 rc = -EIO;
4108 } else
4109 rc = -ENODEV;
4110
4111 /* restore original n_[native_]sectors and fail */
4112 dev->n_native_sectors = n_native_sectors;
4113 dev->n_sectors = n_sectors;
4114 fail:
4115 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4116 return rc;
4117 }
4118
4119 struct ata_blacklist_entry {
4120 const char *model_num;
4121 const char *model_rev;
4122 unsigned long horkage;
4123 };
4124
4125 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4126 /* Devices with DMA related problems under Linux */
4127 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4128 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4129 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4130 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4131 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4132 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4133 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4134 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4135 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4136 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4137 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4138 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4139 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4140 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4141 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4142 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4143 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4144 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4145 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4146 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4147 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4148 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4149 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4150 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4151 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4152 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4153 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4154 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4155 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4156 /* Odd clown on sil3726/4726 PMPs */
4157 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4158
4159 /* Weird ATAPI devices */
4160 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4161 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4162 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4163 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4164
4165 /* Devices we expect to fail diagnostics */
4166
4167 /* Devices where NCQ should be avoided */
4168 /* NCQ is slow */
4169 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4170 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4171 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4172 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4173 /* NCQ is broken */
4174 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4175 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4176 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4177 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4178 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4179
4180 /* Seagate NCQ + FLUSH CACHE firmware bug */
4181 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4182 ATA_HORKAGE_FIRMWARE_WARN },
4183
4184 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4185 ATA_HORKAGE_FIRMWARE_WARN },
4186
4187 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4188 ATA_HORKAGE_FIRMWARE_WARN },
4189
4190 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4191 ATA_HORKAGE_FIRMWARE_WARN },
4192
4193 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
4194 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4195 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4196
4197 /* Blacklist entries taken from Silicon Image 3124/3132
4198 Windows driver .inf file - also several Linux problem reports */
4199 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4200 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4201 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4202
4203 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4204 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4205
4206 /* devices which puke on READ_NATIVE_MAX */
4207 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4208 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4209 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4210 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4211
4212 /* this one allows HPA unlocking but fails IOs on the area */
4213 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4214
4215 /* Devices which report 1 sector over size HPA */
4216 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4217 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4218 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4219
4220 /* Devices which get the IVB wrong */
4221 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4222 /* Maybe we should just blacklist TSSTcorp... */
4223 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4224
4225 /* Devices that do not need bridging limits applied */
4226 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4227 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4228
4229 /* Devices which aren't very happy with higher link speeds */
4230 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4231 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
4232
4233 /*
4234 * Devices which choke on SETXFER. Applies only if both the
4235 * device and controller are SATA.
4236 */
4237 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4238 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4239 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4240 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4241 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4242
4243 /* devices that don't properly handle queued TRIM commands */
4244 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4245 { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4246 { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4247 { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4248
4249 /*
4250 * Some WD SATA-I drives spin up and down erratically when the link
4251 * is put into the slumber mode. We don't have full list of the
4252 * affected devices. Disable LPM if the device matches one of the
4253 * known prefixes and is SATA-1. As a side effect LPM partial is
4254 * lost too.
4255 *
4256 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4257 */
4258 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4259 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4260 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4261 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4262 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4263 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4264 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4265
4266 /* End Marker */
4267 { }
4268 };
4269
4270 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4271 {
4272 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4273 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4274 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4275
4276 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4277 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4278
4279 while (ad->model_num) {
4280 if (glob_match(ad->model_num, model_num)) {
4281 if (ad->model_rev == NULL)
4282 return ad->horkage;
4283 if (glob_match(ad->model_rev, model_rev))
4284 return ad->horkage;
4285 }
4286 ad++;
4287 }
4288 return 0;
4289 }
4290
4291 static int ata_dma_blacklisted(const struct ata_device *dev)
4292 {
4293 /* We don't support polling DMA.
4294 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4295 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4296 */
4297 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4298 (dev->flags & ATA_DFLAG_CDB_INTR))
4299 return 1;
4300 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4301 }
4302
4303 /**
4304 * ata_is_40wire - check drive side detection
4305 * @dev: device
4306 *
4307 * Perform drive side detection decoding, allowing for device vendors
4308 * who can't follow the documentation.
4309 */
4310
4311 static int ata_is_40wire(struct ata_device *dev)
4312 {
4313 if (dev->horkage & ATA_HORKAGE_IVB)
4314 return ata_drive_40wire_relaxed(dev->id);
4315 return ata_drive_40wire(dev->id);
4316 }
4317
4318 /**
4319 * cable_is_40wire - 40/80/SATA decider
4320 * @ap: port to consider
4321 *
4322 * This function encapsulates the policy for speed management
4323 * in one place. At the moment we don't cache the result but
4324 * there is a good case for setting ap->cbl to the result when
4325 * we are called with unknown cables (and figuring out if it
4326 * impacts hotplug at all).
4327 *
4328 * Return 1 if the cable appears to be 40 wire.
4329 */
4330
4331 static int cable_is_40wire(struct ata_port *ap)
4332 {
4333 struct ata_link *link;
4334 struct ata_device *dev;
4335
4336 /* If the controller thinks we are 40 wire, we are. */
4337 if (ap->cbl == ATA_CBL_PATA40)
4338 return 1;
4339
4340 /* If the controller thinks we are 80 wire, we are. */
4341 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4342 return 0;
4343
4344 /* If the system is known to be 40 wire short cable (eg
4345 * laptop), then we allow 80 wire modes even if the drive
4346 * isn't sure.
4347 */
4348 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4349 return 0;
4350
4351 /* If the controller doesn't know, we scan.
4352 *
4353 * Note: We look for all 40 wire detects at this point. Any
4354 * 80 wire detect is taken to be 80 wire cable because
4355 * - in many setups only the one drive (slave if present) will
4356 * give a valid detect
4357 * - if you have a non detect capable drive you don't want it
4358 * to colour the choice
4359 */
4360 ata_for_each_link(link, ap, EDGE) {
4361 ata_for_each_dev(dev, link, ENABLED) {
4362 if (!ata_is_40wire(dev))
4363 return 0;
4364 }
4365 }
4366 return 1;
4367 }
4368
4369 /**
4370 * ata_dev_xfermask - Compute supported xfermask of the given device
4371 * @dev: Device to compute xfermask for
4372 *
4373 * Compute supported xfermask of @dev and store it in
4374 * dev->*_mask. This function is responsible for applying all
4375 * known limits including host controller limits, device
4376 * blacklist, etc...
4377 *
4378 * LOCKING:
4379 * None.
4380 */
4381 static void ata_dev_xfermask(struct ata_device *dev)
4382 {
4383 struct ata_link *link = dev->link;
4384 struct ata_port *ap = link->ap;
4385 struct ata_host *host = ap->host;
4386 unsigned long xfer_mask;
4387
4388 /* controller modes available */
4389 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4390 ap->mwdma_mask, ap->udma_mask);
4391
4392 /* drive modes available */
4393 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4394 dev->mwdma_mask, dev->udma_mask);
4395 xfer_mask &= ata_id_xfermask(dev->id);
4396
4397 /*
4398 * CFA Advanced TrueIDE timings are not allowed on a shared
4399 * cable
4400 */
4401 if (ata_dev_pair(dev)) {
4402 /* No PIO5 or PIO6 */
4403 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4404 /* No MWDMA3 or MWDMA 4 */
4405 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4406 }
4407
4408 if (ata_dma_blacklisted(dev)) {
4409 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4410 ata_dev_warn(dev,
4411 "device is on DMA blacklist, disabling DMA\n");
4412 }
4413
4414 if ((host->flags & ATA_HOST_SIMPLEX) &&
4415 host->simplex_claimed && host->simplex_claimed != ap) {
4416 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4417 ata_dev_warn(dev,
4418 "simplex DMA is claimed by other device, disabling DMA\n");
4419 }
4420
4421 if (ap->flags & ATA_FLAG_NO_IORDY)
4422 xfer_mask &= ata_pio_mask_no_iordy(dev);
4423
4424 if (ap->ops->mode_filter)
4425 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4426
4427 /* Apply cable rule here. Don't apply it early because when
4428 * we handle hot plug the cable type can itself change.
4429 * Check this last so that we know if the transfer rate was
4430 * solely limited by the cable.
4431 * Unknown or 80 wire cables reported host side are checked
4432 * drive side as well. Cases where we know a 40wire cable
4433 * is used safely for 80 are not checked here.
4434 */
4435 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4436 /* UDMA/44 or higher would be available */
4437 if (cable_is_40wire(ap)) {
4438 ata_dev_warn(dev,
4439 "limited to UDMA/33 due to 40-wire cable\n");
4440 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4441 }
4442
4443 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4444 &dev->mwdma_mask, &dev->udma_mask);
4445 }
4446
4447 /**
4448 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4449 * @dev: Device to which command will be sent
4450 *
4451 * Issue SET FEATURES - XFER MODE command to device @dev
4452 * on port @ap.
4453 *
4454 * LOCKING:
4455 * PCI/etc. bus probe sem.
4456 *
4457 * RETURNS:
4458 * 0 on success, AC_ERR_* mask otherwise.
4459 */
4460
4461 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4462 {
4463 struct ata_taskfile tf;
4464 unsigned int err_mask;
4465
4466 /* set up set-features taskfile */
4467 DPRINTK("set features - xfer mode\n");
4468
4469 /* Some controllers and ATAPI devices show flaky interrupt
4470 * behavior after setting xfer mode. Use polling instead.
4471 */
4472 ata_tf_init(dev, &tf);
4473 tf.command = ATA_CMD_SET_FEATURES;
4474 tf.feature = SETFEATURES_XFER;
4475 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4476 tf.protocol = ATA_PROT_NODATA;
4477 /* If we are using IORDY we must send the mode setting command */
4478 if (ata_pio_need_iordy(dev))
4479 tf.nsect = dev->xfer_mode;
4480 /* If the device has IORDY and the controller does not - turn it off */
4481 else if (ata_id_has_iordy(dev->id))
4482 tf.nsect = 0x01;
4483 else /* In the ancient relic department - skip all of this */
4484 return 0;
4485
4486 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4487
4488 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4489 return err_mask;
4490 }
4491
4492 /**
4493 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4494 * @dev: Device to which command will be sent
4495 * @enable: Whether to enable or disable the feature
4496 * @feature: The sector count represents the feature to set
4497 *
4498 * Issue SET FEATURES - SATA FEATURES command to device @dev
4499 * on port @ap with sector count
4500 *
4501 * LOCKING:
4502 * PCI/etc. bus probe sem.
4503 *
4504 * RETURNS:
4505 * 0 on success, AC_ERR_* mask otherwise.
4506 */
4507 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4508 {
4509 struct ata_taskfile tf;
4510 unsigned int err_mask;
4511
4512 /* set up set-features taskfile */
4513 DPRINTK("set features - SATA features\n");
4514
4515 ata_tf_init(dev, &tf);
4516 tf.command = ATA_CMD_SET_FEATURES;
4517 tf.feature = enable;
4518 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4519 tf.protocol = ATA_PROT_NODATA;
4520 tf.nsect = feature;
4521
4522 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4523
4524 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4525 return err_mask;
4526 }
4527 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4528
4529 /**
4530 * ata_dev_init_params - Issue INIT DEV PARAMS command
4531 * @dev: Device to which command will be sent
4532 * @heads: Number of heads (taskfile parameter)
4533 * @sectors: Number of sectors (taskfile parameter)
4534 *
4535 * LOCKING:
4536 * Kernel thread context (may sleep)
4537 *
4538 * RETURNS:
4539 * 0 on success, AC_ERR_* mask otherwise.
4540 */
4541 static unsigned int ata_dev_init_params(struct ata_device *dev,
4542 u16 heads, u16 sectors)
4543 {
4544 struct ata_taskfile tf;
4545 unsigned int err_mask;
4546
4547 /* Number of sectors per track 1-255. Number of heads 1-16 */
4548 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4549 return AC_ERR_INVALID;
4550
4551 /* set up init dev params taskfile */
4552 DPRINTK("init dev params \n");
4553
4554 ata_tf_init(dev, &tf);
4555 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4556 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4557 tf.protocol = ATA_PROT_NODATA;
4558 tf.nsect = sectors;
4559 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4560
4561 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4562 /* A clean abort indicates an original or just out of spec drive
4563 and we should continue as we issue the setup based on the
4564 drive reported working geometry */
4565 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4566 err_mask = 0;
4567
4568 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4569 return err_mask;
4570 }
4571
4572 /**
4573 * ata_sg_clean - Unmap DMA memory associated with command
4574 * @qc: Command containing DMA memory to be released
4575 *
4576 * Unmap all mapped DMA memory associated with this command.
4577 *
4578 * LOCKING:
4579 * spin_lock_irqsave(host lock)
4580 */
4581 void ata_sg_clean(struct ata_queued_cmd *qc)
4582 {
4583 struct ata_port *ap = qc->ap;
4584 struct scatterlist *sg = qc->sg;
4585 int dir = qc->dma_dir;
4586
4587 WARN_ON_ONCE(sg == NULL);
4588
4589 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4590
4591 if (qc->n_elem)
4592 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4593
4594 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4595 qc->sg = NULL;
4596 }
4597
4598 /**
4599 * atapi_check_dma - Check whether ATAPI DMA can be supported
4600 * @qc: Metadata associated with taskfile to check
4601 *
4602 * Allow low-level driver to filter ATA PACKET commands, returning
4603 * a status indicating whether or not it is OK to use DMA for the
4604 * supplied PACKET command.
4605 *
4606 * LOCKING:
4607 * spin_lock_irqsave(host lock)
4608 *
4609 * RETURNS: 0 when ATAPI DMA can be used
4610 * nonzero otherwise
4611 */
4612 int atapi_check_dma(struct ata_queued_cmd *qc)
4613 {
4614 struct ata_port *ap = qc->ap;
4615
4616 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4617 * few ATAPI devices choke on such DMA requests.
4618 */
4619 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4620 unlikely(qc->nbytes & 15))
4621 return 1;
4622
4623 if (ap->ops->check_atapi_dma)
4624 return ap->ops->check_atapi_dma(qc);
4625
4626 return 0;
4627 }
4628
4629 /**
4630 * ata_std_qc_defer - Check whether a qc needs to be deferred
4631 * @qc: ATA command in question
4632 *
4633 * Non-NCQ commands cannot run with any other command, NCQ or
4634 * not. As upper layer only knows the queue depth, we are
4635 * responsible for maintaining exclusion. This function checks
4636 * whether a new command @qc can be issued.
4637 *
4638 * LOCKING:
4639 * spin_lock_irqsave(host lock)
4640 *
4641 * RETURNS:
4642 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4643 */
4644 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4645 {
4646 struct ata_link *link = qc->dev->link;
4647
4648 if (qc->tf.protocol == ATA_PROT_NCQ) {
4649 if (!ata_tag_valid(link->active_tag))
4650 return 0;
4651 } else {
4652 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4653 return 0;
4654 }
4655
4656 return ATA_DEFER_LINK;
4657 }
4658
4659 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4660
4661 /**
4662 * ata_sg_init - Associate command with scatter-gather table.
4663 * @qc: Command to be associated
4664 * @sg: Scatter-gather table.
4665 * @n_elem: Number of elements in s/g table.
4666 *
4667 * Initialize the data-related elements of queued_cmd @qc
4668 * to point to a scatter-gather table @sg, containing @n_elem
4669 * elements.
4670 *
4671 * LOCKING:
4672 * spin_lock_irqsave(host lock)
4673 */
4674 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4675 unsigned int n_elem)
4676 {
4677 qc->sg = sg;
4678 qc->n_elem = n_elem;
4679 qc->cursg = qc->sg;
4680 }
4681
4682 /**
4683 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4684 * @qc: Command with scatter-gather table to be mapped.
4685 *
4686 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4687 *
4688 * LOCKING:
4689 * spin_lock_irqsave(host lock)
4690 *
4691 * RETURNS:
4692 * Zero on success, negative on error.
4693 *
4694 */
4695 static int ata_sg_setup(struct ata_queued_cmd *qc)
4696 {
4697 struct ata_port *ap = qc->ap;
4698 unsigned int n_elem;
4699
4700 VPRINTK("ENTER, ata%u\n", ap->print_id);
4701
4702 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4703 if (n_elem < 1)
4704 return -1;
4705
4706 DPRINTK("%d sg elements mapped\n", n_elem);
4707 qc->orig_n_elem = qc->n_elem;
4708 qc->n_elem = n_elem;
4709 qc->flags |= ATA_QCFLAG_DMAMAP;
4710
4711 return 0;
4712 }
4713
4714 /**
4715 * swap_buf_le16 - swap halves of 16-bit words in place
4716 * @buf: Buffer to swap
4717 * @buf_words: Number of 16-bit words in buffer.
4718 *
4719 * Swap halves of 16-bit words if needed to convert from
4720 * little-endian byte order to native cpu byte order, or
4721 * vice-versa.
4722 *
4723 * LOCKING:
4724 * Inherited from caller.
4725 */
4726 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4727 {
4728 #ifdef __BIG_ENDIAN
4729 unsigned int i;
4730
4731 for (i = 0; i < buf_words; i++)
4732 buf[i] = le16_to_cpu(buf[i]);
4733 #endif /* __BIG_ENDIAN */
4734 }
4735
4736 /**
4737 * ata_qc_new - Request an available ATA command, for queueing
4738 * @ap: target port
4739 *
4740 * Some ATA host controllers may implement a queue depth which is less
4741 * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
4742 * the hardware limitation.
4743 *
4744 * LOCKING:
4745 * None.
4746 */
4747
4748 static struct ata_queued_cmd *sas_ata_qc_new(struct ata_port *ap)
4749 {
4750 struct ata_queued_cmd *qc = NULL;
4751 unsigned int max_queue = ap->host->n_tags;
4752 unsigned int i, tag;
4753
4754 for (i = 0, tag = ap->sas_last_tag + 1; i < max_queue; i++, tag++) {
4755 tag = tag < max_queue ? tag : 0;
4756
4757 /* the last tag is reserved for internal command. */
4758 if (tag == ATA_TAG_INTERNAL)
4759 continue;
4760
4761 if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) {
4762 qc = __ata_qc_from_tag(ap, tag);
4763 qc->tag = tag;
4764 ap->sas_last_tag = tag;
4765 break;
4766 }
4767 }
4768
4769 return qc;
4770 }
4771
4772 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap, int blktag)
4773 {
4774 struct ata_queued_cmd *qc;
4775
4776 /* no command while frozen */
4777 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4778 return NULL;
4779
4780 /* SATA will directly use block tag. libsas need its own tag management */
4781 if (ap->scsi_host) {
4782 qc = __ata_qc_from_tag(ap, blktag);
4783 qc->tag = blktag;
4784 return qc;
4785 }
4786
4787 return sas_ata_qc_new(ap);
4788 }
4789
4790 /**
4791 * ata_qc_new_init - Request an available ATA command, and initialize it
4792 * @dev: Device from whom we request an available command structure
4793 *
4794 * LOCKING:
4795 * None.
4796 */
4797
4798 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int blktag)
4799 {
4800 struct ata_port *ap = dev->link->ap;
4801 struct ata_queued_cmd *qc;
4802
4803 qc = ata_qc_new(ap, blktag);
4804 if (qc) {
4805 qc->scsicmd = NULL;
4806 qc->ap = ap;
4807 qc->dev = dev;
4808
4809 ata_qc_reinit(qc);
4810 }
4811
4812 return qc;
4813 }
4814
4815 /**
4816 * ata_qc_free - free unused ata_queued_cmd
4817 * @qc: Command to complete
4818 *
4819 * Designed to free unused ata_queued_cmd object
4820 * in case something prevents using it.
4821 *
4822 * LOCKING:
4823 * spin_lock_irqsave(host lock)
4824 */
4825 static void sas_ata_qc_free(unsigned int tag, struct ata_port *ap)
4826 {
4827 if (!ap->scsi_host)
4828 clear_bit(tag, &ap->sas_tag_allocated);
4829 }
4830
4831 void ata_qc_free(struct ata_queued_cmd *qc)
4832 {
4833 struct ata_port *ap;
4834 unsigned int tag;
4835
4836 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4837 ap = qc->ap;
4838
4839 qc->flags = 0;
4840 tag = qc->tag;
4841 if (likely(ata_tag_valid(tag))) {
4842 qc->tag = ATA_TAG_POISON;
4843 sas_ata_qc_free(tag, ap);
4844 }
4845 }
4846
4847 void __ata_qc_complete(struct ata_queued_cmd *qc)
4848 {
4849 struct ata_port *ap;
4850 struct ata_link *link;
4851
4852 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4853 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4854 ap = qc->ap;
4855 link = qc->dev->link;
4856
4857 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4858 ata_sg_clean(qc);
4859
4860 /* command should be marked inactive atomically with qc completion */
4861 if (qc->tf.protocol == ATA_PROT_NCQ) {
4862 link->sactive &= ~(1 << qc->tag);
4863 if (!link->sactive)
4864 ap->nr_active_links--;
4865 } else {
4866 link->active_tag = ATA_TAG_POISON;
4867 ap->nr_active_links--;
4868 }
4869
4870 /* clear exclusive status */
4871 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4872 ap->excl_link == link))
4873 ap->excl_link = NULL;
4874
4875 /* atapi: mark qc as inactive to prevent the interrupt handler
4876 * from completing the command twice later, before the error handler
4877 * is called. (when rc != 0 and atapi request sense is needed)
4878 */
4879 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4880 ap->qc_active &= ~(1 << qc->tag);
4881
4882 /* call completion callback */
4883 qc->complete_fn(qc);
4884 }
4885
4886 static void fill_result_tf(struct ata_queued_cmd *qc)
4887 {
4888 struct ata_port *ap = qc->ap;
4889
4890 qc->result_tf.flags = qc->tf.flags;
4891 ap->ops->qc_fill_rtf(qc);
4892 }
4893
4894 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4895 {
4896 struct ata_device *dev = qc->dev;
4897
4898 if (ata_is_nodata(qc->tf.protocol))
4899 return;
4900
4901 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4902 return;
4903
4904 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4905 }
4906
4907 /**
4908 * ata_qc_complete - Complete an active ATA command
4909 * @qc: Command to complete
4910 *
4911 * Indicate to the mid and upper layers that an ATA command has
4912 * completed, with either an ok or not-ok status.
4913 *
4914 * Refrain from calling this function multiple times when
4915 * successfully completing multiple NCQ commands.
4916 * ata_qc_complete_multiple() should be used instead, which will
4917 * properly update IRQ expect state.
4918 *
4919 * LOCKING:
4920 * spin_lock_irqsave(host lock)
4921 */
4922 void ata_qc_complete(struct ata_queued_cmd *qc)
4923 {
4924 struct ata_port *ap = qc->ap;
4925
4926 /* XXX: New EH and old EH use different mechanisms to
4927 * synchronize EH with regular execution path.
4928 *
4929 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4930 * Normal execution path is responsible for not accessing a
4931 * failed qc. libata core enforces the rule by returning NULL
4932 * from ata_qc_from_tag() for failed qcs.
4933 *
4934 * Old EH depends on ata_qc_complete() nullifying completion
4935 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4936 * not synchronize with interrupt handler. Only PIO task is
4937 * taken care of.
4938 */
4939 if (ap->ops->error_handler) {
4940 struct ata_device *dev = qc->dev;
4941 struct ata_eh_info *ehi = &dev->link->eh_info;
4942
4943 if (unlikely(qc->err_mask))
4944 qc->flags |= ATA_QCFLAG_FAILED;
4945
4946 /*
4947 * Finish internal commands without any further processing
4948 * and always with the result TF filled.
4949 */
4950 if (unlikely(ata_tag_internal(qc->tag))) {
4951 fill_result_tf(qc);
4952 __ata_qc_complete(qc);
4953 return;
4954 }
4955
4956 /*
4957 * Non-internal qc has failed. Fill the result TF and
4958 * summon EH.
4959 */
4960 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4961 fill_result_tf(qc);
4962 ata_qc_schedule_eh(qc);
4963 return;
4964 }
4965
4966 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4967
4968 /* read result TF if requested */
4969 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4970 fill_result_tf(qc);
4971
4972 /* Some commands need post-processing after successful
4973 * completion.
4974 */
4975 switch (qc->tf.command) {
4976 case ATA_CMD_SET_FEATURES:
4977 if (qc->tf.feature != SETFEATURES_WC_ON &&
4978 qc->tf.feature != SETFEATURES_WC_OFF)
4979 break;
4980 /* fall through */
4981 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4982 case ATA_CMD_SET_MULTI: /* multi_count changed */
4983 /* revalidate device */
4984 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4985 ata_port_schedule_eh(ap);
4986 break;
4987
4988 case ATA_CMD_SLEEP:
4989 dev->flags |= ATA_DFLAG_SLEEPING;
4990 break;
4991 }
4992
4993 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4994 ata_verify_xfer(qc);
4995
4996 __ata_qc_complete(qc);
4997 } else {
4998 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4999 return;
5000
5001 /* read result TF if failed or requested */
5002 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5003 fill_result_tf(qc);
5004
5005 __ata_qc_complete(qc);
5006 }
5007 }
5008
5009 /**
5010 * ata_qc_complete_multiple - Complete multiple qcs successfully
5011 * @ap: port in question
5012 * @qc_active: new qc_active mask
5013 *
5014 * Complete in-flight commands. This functions is meant to be
5015 * called from low-level driver's interrupt routine to complete
5016 * requests normally. ap->qc_active and @qc_active is compared
5017 * and commands are completed accordingly.
5018 *
5019 * Always use this function when completing multiple NCQ commands
5020 * from IRQ handlers instead of calling ata_qc_complete()
5021 * multiple times to keep IRQ expect status properly in sync.
5022 *
5023 * LOCKING:
5024 * spin_lock_irqsave(host lock)
5025 *
5026 * RETURNS:
5027 * Number of completed commands on success, -errno otherwise.
5028 */
5029 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5030 {
5031 int nr_done = 0;
5032 u32 done_mask;
5033
5034 done_mask = ap->qc_active ^ qc_active;
5035
5036 if (unlikely(done_mask & qc_active)) {
5037 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
5038 ap->qc_active, qc_active);
5039 return -EINVAL;
5040 }
5041
5042 while (done_mask) {
5043 struct ata_queued_cmd *qc;
5044 unsigned int tag = __ffs(done_mask);
5045
5046 qc = ata_qc_from_tag(ap, tag);
5047 if (qc) {
5048 ata_qc_complete(qc);
5049 nr_done++;
5050 }
5051 done_mask &= ~(1 << tag);
5052 }
5053
5054 return nr_done;
5055 }
5056
5057 /**
5058 * ata_qc_issue - issue taskfile to device
5059 * @qc: command to issue to device
5060 *
5061 * Prepare an ATA command to submission to device.
5062 * This includes mapping the data into a DMA-able
5063 * area, filling in the S/G table, and finally
5064 * writing the taskfile to hardware, starting the command.
5065 *
5066 * LOCKING:
5067 * spin_lock_irqsave(host lock)
5068 */
5069 void ata_qc_issue(struct ata_queued_cmd *qc)
5070 {
5071 struct ata_port *ap = qc->ap;
5072 struct ata_link *link = qc->dev->link;
5073 u8 prot = qc->tf.protocol;
5074
5075 /* Make sure only one non-NCQ command is outstanding. The
5076 * check is skipped for old EH because it reuses active qc to
5077 * request ATAPI sense.
5078 */
5079 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5080
5081 if (ata_is_ncq(prot)) {
5082 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5083
5084 if (!link->sactive)
5085 ap->nr_active_links++;
5086 link->sactive |= 1 << qc->tag;
5087 } else {
5088 WARN_ON_ONCE(link->sactive);
5089
5090 ap->nr_active_links++;
5091 link->active_tag = qc->tag;
5092 }
5093
5094 qc->flags |= ATA_QCFLAG_ACTIVE;
5095 ap->qc_active |= 1 << qc->tag;
5096
5097 /*
5098 * We guarantee to LLDs that they will have at least one
5099 * non-zero sg if the command is a data command.
5100 */
5101 if (WARN_ON_ONCE(ata_is_data(prot) &&
5102 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5103 goto sys_err;
5104
5105 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5106 (ap->flags & ATA_FLAG_PIO_DMA)))
5107 if (ata_sg_setup(qc))
5108 goto sys_err;
5109
5110 /* if device is sleeping, schedule reset and abort the link */
5111 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5112 link->eh_info.action |= ATA_EH_RESET;
5113 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5114 ata_link_abort(link);
5115 return;
5116 }
5117
5118 ap->ops->qc_prep(qc);
5119
5120 qc->err_mask |= ap->ops->qc_issue(qc);
5121 if (unlikely(qc->err_mask))
5122 goto err;
5123 return;
5124
5125 sys_err:
5126 qc->err_mask |= AC_ERR_SYSTEM;
5127 err:
5128 ata_qc_complete(qc);
5129 }
5130
5131 /**
5132 * sata_scr_valid - test whether SCRs are accessible
5133 * @link: ATA link to test SCR accessibility for
5134 *
5135 * Test whether SCRs are accessible for @link.
5136 *
5137 * LOCKING:
5138 * None.
5139 *
5140 * RETURNS:
5141 * 1 if SCRs are accessible, 0 otherwise.
5142 */
5143 int sata_scr_valid(struct ata_link *link)
5144 {
5145 struct ata_port *ap = link->ap;
5146
5147 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5148 }
5149
5150 /**
5151 * sata_scr_read - read SCR register of the specified port
5152 * @link: ATA link to read SCR for
5153 * @reg: SCR to read
5154 * @val: Place to store read value
5155 *
5156 * Read SCR register @reg of @link into *@val. This function is
5157 * guaranteed to succeed if @link is ap->link, the cable type of
5158 * the port is SATA and the port implements ->scr_read.
5159 *
5160 * LOCKING:
5161 * None if @link is ap->link. Kernel thread context otherwise.
5162 *
5163 * RETURNS:
5164 * 0 on success, negative errno on failure.
5165 */
5166 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5167 {
5168 if (ata_is_host_link(link)) {
5169 if (sata_scr_valid(link))
5170 return link->ap->ops->scr_read(link, reg, val);
5171 return -EOPNOTSUPP;
5172 }
5173
5174 return sata_pmp_scr_read(link, reg, val);
5175 }
5176
5177 /**
5178 * sata_scr_write - write SCR register of the specified port
5179 * @link: ATA link to write SCR for
5180 * @reg: SCR to write
5181 * @val: value to write
5182 *
5183 * Write @val to SCR register @reg of @link. This function is
5184 * guaranteed to succeed if @link is ap->link, the cable type of
5185 * the port is SATA and the port implements ->scr_read.
5186 *
5187 * LOCKING:
5188 * None if @link is ap->link. Kernel thread context otherwise.
5189 *
5190 * RETURNS:
5191 * 0 on success, negative errno on failure.
5192 */
5193 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5194 {
5195 if (ata_is_host_link(link)) {
5196 if (sata_scr_valid(link))
5197 return link->ap->ops->scr_write(link, reg, val);
5198 return -EOPNOTSUPP;
5199 }
5200
5201 return sata_pmp_scr_write(link, reg, val);
5202 }
5203
5204 /**
5205 * sata_scr_write_flush - write SCR register of the specified port and flush
5206 * @link: ATA link to write SCR for
5207 * @reg: SCR to write
5208 * @val: value to write
5209 *
5210 * This function is identical to sata_scr_write() except that this
5211 * function performs flush after writing to the register.
5212 *
5213 * LOCKING:
5214 * None if @link is ap->link. Kernel thread context otherwise.
5215 *
5216 * RETURNS:
5217 * 0 on success, negative errno on failure.
5218 */
5219 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5220 {
5221 if (ata_is_host_link(link)) {
5222 int rc;
5223
5224 if (sata_scr_valid(link)) {
5225 rc = link->ap->ops->scr_write(link, reg, val);
5226 if (rc == 0)
5227 rc = link->ap->ops->scr_read(link, reg, &val);
5228 return rc;
5229 }
5230 return -EOPNOTSUPP;
5231 }
5232
5233 return sata_pmp_scr_write(link, reg, val);
5234 }
5235
5236 /**
5237 * ata_phys_link_online - test whether the given link is online
5238 * @link: ATA link to test
5239 *
5240 * Test whether @link is online. Note that this function returns
5241 * 0 if online status of @link cannot be obtained, so
5242 * ata_link_online(link) != !ata_link_offline(link).
5243 *
5244 * LOCKING:
5245 * None.
5246 *
5247 * RETURNS:
5248 * True if the port online status is available and online.
5249 */
5250 bool ata_phys_link_online(struct ata_link *link)
5251 {
5252 u32 sstatus;
5253
5254 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5255 ata_sstatus_online(sstatus))
5256 return true;
5257 return false;
5258 }
5259
5260 /**
5261 * ata_phys_link_offline - test whether the given link is offline
5262 * @link: ATA link to test
5263 *
5264 * Test whether @link is offline. Note that this function
5265 * returns 0 if offline status of @link cannot be obtained, so
5266 * ata_link_online(link) != !ata_link_offline(link).
5267 *
5268 * LOCKING:
5269 * None.
5270 *
5271 * RETURNS:
5272 * True if the port offline status is available and offline.
5273 */
5274 bool ata_phys_link_offline(struct ata_link *link)
5275 {
5276 u32 sstatus;
5277
5278 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5279 !ata_sstatus_online(sstatus))
5280 return true;
5281 return false;
5282 }
5283
5284 /**
5285 * ata_link_online - test whether the given link is online
5286 * @link: ATA link to test
5287 *
5288 * Test whether @link is online. This is identical to
5289 * ata_phys_link_online() when there's no slave link. When
5290 * there's a slave link, this function should only be called on
5291 * the master link and will return true if any of M/S links is
5292 * online.
5293 *
5294 * LOCKING:
5295 * None.
5296 *
5297 * RETURNS:
5298 * True if the port online status is available and online.
5299 */
5300 bool ata_link_online(struct ata_link *link)
5301 {
5302 struct ata_link *slave = link->ap->slave_link;
5303
5304 WARN_ON(link == slave); /* shouldn't be called on slave link */
5305
5306 return ata_phys_link_online(link) ||
5307 (slave && ata_phys_link_online(slave));
5308 }
5309
5310 /**
5311 * ata_link_offline - test whether the given link is offline
5312 * @link: ATA link to test
5313 *
5314 * Test whether @link is offline. This is identical to
5315 * ata_phys_link_offline() when there's no slave link. When
5316 * there's a slave link, this function should only be called on
5317 * the master link and will return true if both M/S links are
5318 * offline.
5319 *
5320 * LOCKING:
5321 * None.
5322 *
5323 * RETURNS:
5324 * True if the port offline status is available and offline.
5325 */
5326 bool ata_link_offline(struct ata_link *link)
5327 {
5328 struct ata_link *slave = link->ap->slave_link;
5329
5330 WARN_ON(link == slave); /* shouldn't be called on slave link */
5331
5332 return ata_phys_link_offline(link) &&
5333 (!slave || ata_phys_link_offline(slave));
5334 }
5335
5336 #ifdef CONFIG_PM
5337 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5338 unsigned int action, unsigned int ehi_flags,
5339 bool async)
5340 {
5341 struct ata_link *link;
5342 unsigned long flags;
5343
5344 /* Previous resume operation might still be in
5345 * progress. Wait for PM_PENDING to clear.
5346 */
5347 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5348 ata_port_wait_eh(ap);
5349 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5350 }
5351
5352 /* request PM ops to EH */
5353 spin_lock_irqsave(ap->lock, flags);
5354
5355 ap->pm_mesg = mesg;
5356 ap->pflags |= ATA_PFLAG_PM_PENDING;
5357 ata_for_each_link(link, ap, HOST_FIRST) {
5358 link->eh_info.action |= action;
5359 link->eh_info.flags |= ehi_flags;
5360 }
5361
5362 ata_port_schedule_eh(ap);
5363
5364 spin_unlock_irqrestore(ap->lock, flags);
5365
5366 if (!async) {
5367 ata_port_wait_eh(ap);
5368 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5369 }
5370 }
5371
5372 /*
5373 * On some hardware, device fails to respond after spun down for suspend. As
5374 * the device won't be used before being resumed, we don't need to touch the
5375 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
5376 *
5377 * http://thread.gmane.org/gmane.linux.ide/46764
5378 */
5379 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5380 | ATA_EHI_NO_AUTOPSY
5381 | ATA_EHI_NO_RECOVERY;
5382
5383 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5384 {
5385 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5386 }
5387
5388 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5389 {
5390 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5391 }
5392
5393 static int ata_port_pm_suspend(struct device *dev)
5394 {
5395 struct ata_port *ap = to_ata_port(dev);
5396
5397 if (pm_runtime_suspended(dev))
5398 return 0;
5399
5400 ata_port_suspend(ap, PMSG_SUSPEND);
5401 return 0;
5402 }
5403
5404 static int ata_port_pm_freeze(struct device *dev)
5405 {
5406 struct ata_port *ap = to_ata_port(dev);
5407
5408 if (pm_runtime_suspended(dev))
5409 return 0;
5410
5411 ata_port_suspend(ap, PMSG_FREEZE);
5412 return 0;
5413 }
5414
5415 static int ata_port_pm_poweroff(struct device *dev)
5416 {
5417 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5418 return 0;
5419 }
5420
5421 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5422 | ATA_EHI_QUIET;
5423
5424 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5425 {
5426 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5427 }
5428
5429 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5430 {
5431 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5432 }
5433
5434 static int ata_port_pm_resume(struct device *dev)
5435 {
5436 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5437 pm_runtime_disable(dev);
5438 pm_runtime_set_active(dev);
5439 pm_runtime_enable(dev);
5440 return 0;
5441 }
5442
5443 /*
5444 * For ODDs, the upper layer will poll for media change every few seconds,
5445 * which will make it enter and leave suspend state every few seconds. And
5446 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5447 * is very little and the ODD may malfunction after constantly being reset.
5448 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5449 * ODD is attached to the port.
5450 */
5451 static int ata_port_runtime_idle(struct device *dev)
5452 {
5453 struct ata_port *ap = to_ata_port(dev);
5454 struct ata_link *link;
5455 struct ata_device *adev;
5456
5457 ata_for_each_link(link, ap, HOST_FIRST) {
5458 ata_for_each_dev(adev, link, ENABLED)
5459 if (adev->class == ATA_DEV_ATAPI &&
5460 !zpodd_dev_enabled(adev))
5461 return -EBUSY;
5462 }
5463
5464 return 0;
5465 }
5466
5467 static int ata_port_runtime_suspend(struct device *dev)
5468 {
5469 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5470 return 0;
5471 }
5472
5473 static int ata_port_runtime_resume(struct device *dev)
5474 {
5475 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5476 return 0;
5477 }
5478
5479 static const struct dev_pm_ops ata_port_pm_ops = {
5480 .suspend = ata_port_pm_suspend,
5481 .resume = ata_port_pm_resume,
5482 .freeze = ata_port_pm_freeze,
5483 .thaw = ata_port_pm_resume,
5484 .poweroff = ata_port_pm_poweroff,
5485 .restore = ata_port_pm_resume,
5486
5487 .runtime_suspend = ata_port_runtime_suspend,
5488 .runtime_resume = ata_port_runtime_resume,
5489 .runtime_idle = ata_port_runtime_idle,
5490 };
5491
5492 /* sas ports don't participate in pm runtime management of ata_ports,
5493 * and need to resume ata devices at the domain level, not the per-port
5494 * level. sas suspend/resume is async to allow parallel port recovery
5495 * since sas has multiple ata_port instances per Scsi_Host.
5496 */
5497 void ata_sas_port_suspend(struct ata_port *ap)
5498 {
5499 ata_port_suspend_async(ap, PMSG_SUSPEND);
5500 }
5501 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5502
5503 void ata_sas_port_resume(struct ata_port *ap)
5504 {
5505 ata_port_resume_async(ap, PMSG_RESUME);
5506 }
5507 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5508
5509 /**
5510 * ata_host_suspend - suspend host
5511 * @host: host to suspend
5512 * @mesg: PM message
5513 *
5514 * Suspend @host. Actual operation is performed by port suspend.
5515 */
5516 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5517 {
5518 host->dev->power.power_state = mesg;
5519 return 0;
5520 }
5521
5522 /**
5523 * ata_host_resume - resume host
5524 * @host: host to resume
5525 *
5526 * Resume @host. Actual operation is performed by port resume.
5527 */
5528 void ata_host_resume(struct ata_host *host)
5529 {
5530 host->dev->power.power_state = PMSG_ON;
5531 }
5532 #endif
5533
5534 struct device_type ata_port_type = {
5535 .name = "ata_port",
5536 #ifdef CONFIG_PM
5537 .pm = &ata_port_pm_ops,
5538 #endif
5539 };
5540
5541 /**
5542 * ata_dev_init - Initialize an ata_device structure
5543 * @dev: Device structure to initialize
5544 *
5545 * Initialize @dev in preparation for probing.
5546 *
5547 * LOCKING:
5548 * Inherited from caller.
5549 */
5550 void ata_dev_init(struct ata_device *dev)
5551 {
5552 struct ata_link *link = ata_dev_phys_link(dev);
5553 struct ata_port *ap = link->ap;
5554 unsigned long flags;
5555
5556 /* SATA spd limit is bound to the attached device, reset together */
5557 link->sata_spd_limit = link->hw_sata_spd_limit;
5558 link->sata_spd = 0;
5559
5560 /* High bits of dev->flags are used to record warm plug
5561 * requests which occur asynchronously. Synchronize using
5562 * host lock.
5563 */
5564 spin_lock_irqsave(ap->lock, flags);
5565 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5566 dev->horkage = 0;
5567 spin_unlock_irqrestore(ap->lock, flags);
5568
5569 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5570 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5571 dev->pio_mask = UINT_MAX;
5572 dev->mwdma_mask = UINT_MAX;
5573 dev->udma_mask = UINT_MAX;
5574 }
5575
5576 /**
5577 * ata_link_init - Initialize an ata_link structure
5578 * @ap: ATA port link is attached to
5579 * @link: Link structure to initialize
5580 * @pmp: Port multiplier port number
5581 *
5582 * Initialize @link.
5583 *
5584 * LOCKING:
5585 * Kernel thread context (may sleep)
5586 */
5587 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5588 {
5589 int i;
5590
5591 /* clear everything except for devices */
5592 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5593 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5594
5595 link->ap = ap;
5596 link->pmp = pmp;
5597 link->active_tag = ATA_TAG_POISON;
5598 link->hw_sata_spd_limit = UINT_MAX;
5599
5600 /* can't use iterator, ap isn't initialized yet */
5601 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5602 struct ata_device *dev = &link->device[i];
5603
5604 dev->link = link;
5605 dev->devno = dev - link->device;
5606 #ifdef CONFIG_ATA_ACPI
5607 dev->gtf_filter = ata_acpi_gtf_filter;
5608 #endif
5609 ata_dev_init(dev);
5610 }
5611 }
5612
5613 /**
5614 * sata_link_init_spd - Initialize link->sata_spd_limit
5615 * @link: Link to configure sata_spd_limit for
5616 *
5617 * Initialize @link->[hw_]sata_spd_limit to the currently
5618 * configured value.
5619 *
5620 * LOCKING:
5621 * Kernel thread context (may sleep).
5622 *
5623 * RETURNS:
5624 * 0 on success, -errno on failure.
5625 */
5626 int sata_link_init_spd(struct ata_link *link)
5627 {
5628 u8 spd;
5629 int rc;
5630
5631 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5632 if (rc)
5633 return rc;
5634
5635 spd = (link->saved_scontrol >> 4) & 0xf;
5636 if (spd)
5637 link->hw_sata_spd_limit &= (1 << spd) - 1;
5638
5639 ata_force_link_limits(link);
5640
5641 link->sata_spd_limit = link->hw_sata_spd_limit;
5642
5643 return 0;
5644 }
5645
5646 /**
5647 * ata_port_alloc - allocate and initialize basic ATA port resources
5648 * @host: ATA host this allocated port belongs to
5649 *
5650 * Allocate and initialize basic ATA port resources.
5651 *
5652 * RETURNS:
5653 * Allocate ATA port on success, NULL on failure.
5654 *
5655 * LOCKING:
5656 * Inherited from calling layer (may sleep).
5657 */
5658 struct ata_port *ata_port_alloc(struct ata_host *host)
5659 {
5660 struct ata_port *ap;
5661
5662 DPRINTK("ENTER\n");
5663
5664 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5665 if (!ap)
5666 return NULL;
5667
5668 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5669 ap->lock = &host->lock;
5670 ap->print_id = -1;
5671 ap->local_port_no = -1;
5672 ap->host = host;
5673 ap->dev = host->dev;
5674
5675 #if defined(ATA_VERBOSE_DEBUG)
5676 /* turn on all debugging levels */
5677 ap->msg_enable = 0x00FF;
5678 #elif defined(ATA_DEBUG)
5679 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5680 #else
5681 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5682 #endif
5683
5684 mutex_init(&ap->scsi_scan_mutex);
5685 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5686 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5687 INIT_LIST_HEAD(&ap->eh_done_q);
5688 init_waitqueue_head(&ap->eh_wait_q);
5689 init_completion(&ap->park_req_pending);
5690 init_timer_deferrable(&ap->fastdrain_timer);
5691 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5692 ap->fastdrain_timer.data = (unsigned long)ap;
5693
5694 ap->cbl = ATA_CBL_NONE;
5695
5696 ata_link_init(ap, &ap->link, 0);
5697
5698 #ifdef ATA_IRQ_TRAP
5699 ap->stats.unhandled_irq = 1;
5700 ap->stats.idle_irq = 1;
5701 #endif
5702 ata_sff_port_init(ap);
5703
5704 return ap;
5705 }
5706
5707 static void ata_host_release(struct device *gendev, void *res)
5708 {
5709 struct ata_host *host = dev_get_drvdata(gendev);
5710 int i;
5711
5712 for (i = 0; i < host->n_ports; i++) {
5713 struct ata_port *ap = host->ports[i];
5714
5715 if (!ap)
5716 continue;
5717
5718 if (ap->scsi_host)
5719 scsi_host_put(ap->scsi_host);
5720
5721 kfree(ap->pmp_link);
5722 kfree(ap->slave_link);
5723 kfree(ap);
5724 host->ports[i] = NULL;
5725 }
5726
5727 dev_set_drvdata(gendev, NULL);
5728 }
5729
5730 /**
5731 * ata_host_alloc - allocate and init basic ATA host resources
5732 * @dev: generic device this host is associated with
5733 * @max_ports: maximum number of ATA ports associated with this host
5734 *
5735 * Allocate and initialize basic ATA host resources. LLD calls
5736 * this function to allocate a host, initializes it fully and
5737 * attaches it using ata_host_register().
5738 *
5739 * @max_ports ports are allocated and host->n_ports is
5740 * initialized to @max_ports. The caller is allowed to decrease
5741 * host->n_ports before calling ata_host_register(). The unused
5742 * ports will be automatically freed on registration.
5743 *
5744 * RETURNS:
5745 * Allocate ATA host on success, NULL on failure.
5746 *
5747 * LOCKING:
5748 * Inherited from calling layer (may sleep).
5749 */
5750 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5751 {
5752 struct ata_host *host;
5753 size_t sz;
5754 int i;
5755
5756 DPRINTK("ENTER\n");
5757
5758 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5759 return NULL;
5760
5761 /* alloc a container for our list of ATA ports (buses) */
5762 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5763 /* alloc a container for our list of ATA ports (buses) */
5764 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5765 if (!host)
5766 goto err_out;
5767
5768 devres_add(dev, host);
5769 dev_set_drvdata(dev, host);
5770
5771 spin_lock_init(&host->lock);
5772 mutex_init(&host->eh_mutex);
5773 host->dev = dev;
5774 host->n_ports = max_ports;
5775
5776 /* allocate ports bound to this host */
5777 for (i = 0; i < max_ports; i++) {
5778 struct ata_port *ap;
5779
5780 ap = ata_port_alloc(host);
5781 if (!ap)
5782 goto err_out;
5783
5784 ap->port_no = i;
5785 host->ports[i] = ap;
5786 }
5787
5788 devres_remove_group(dev, NULL);
5789 return host;
5790
5791 err_out:
5792 devres_release_group(dev, NULL);
5793 return NULL;
5794 }
5795
5796 /**
5797 * ata_host_alloc_pinfo - alloc host and init with port_info array
5798 * @dev: generic device this host is associated with
5799 * @ppi: array of ATA port_info to initialize host with
5800 * @n_ports: number of ATA ports attached to this host
5801 *
5802 * Allocate ATA host and initialize with info from @ppi. If NULL
5803 * terminated, @ppi may contain fewer entries than @n_ports. The
5804 * last entry will be used for the remaining ports.
5805 *
5806 * RETURNS:
5807 * Allocate ATA host on success, NULL on failure.
5808 *
5809 * LOCKING:
5810 * Inherited from calling layer (may sleep).
5811 */
5812 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5813 const struct ata_port_info * const * ppi,
5814 int n_ports)
5815 {
5816 const struct ata_port_info *pi;
5817 struct ata_host *host;
5818 int i, j;
5819
5820 host = ata_host_alloc(dev, n_ports);
5821 if (!host)
5822 return NULL;
5823
5824 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5825 struct ata_port *ap = host->ports[i];
5826
5827 if (ppi[j])
5828 pi = ppi[j++];
5829
5830 ap->pio_mask = pi->pio_mask;
5831 ap->mwdma_mask = pi->mwdma_mask;
5832 ap->udma_mask = pi->udma_mask;
5833 ap->flags |= pi->flags;
5834 ap->link.flags |= pi->link_flags;
5835 ap->ops = pi->port_ops;
5836
5837 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5838 host->ops = pi->port_ops;
5839 }
5840
5841 return host;
5842 }
5843
5844 /**
5845 * ata_slave_link_init - initialize slave link
5846 * @ap: port to initialize slave link for
5847 *
5848 * Create and initialize slave link for @ap. This enables slave
5849 * link handling on the port.
5850 *
5851 * In libata, a port contains links and a link contains devices.
5852 * There is single host link but if a PMP is attached to it,
5853 * there can be multiple fan-out links. On SATA, there's usually
5854 * a single device connected to a link but PATA and SATA
5855 * controllers emulating TF based interface can have two - master
5856 * and slave.
5857 *
5858 * However, there are a few controllers which don't fit into this
5859 * abstraction too well - SATA controllers which emulate TF
5860 * interface with both master and slave devices but also have
5861 * separate SCR register sets for each device. These controllers
5862 * need separate links for physical link handling
5863 * (e.g. onlineness, link speed) but should be treated like a
5864 * traditional M/S controller for everything else (e.g. command
5865 * issue, softreset).
5866 *
5867 * slave_link is libata's way of handling this class of
5868 * controllers without impacting core layer too much. For
5869 * anything other than physical link handling, the default host
5870 * link is used for both master and slave. For physical link
5871 * handling, separate @ap->slave_link is used. All dirty details
5872 * are implemented inside libata core layer. From LLD's POV, the
5873 * only difference is that prereset, hardreset and postreset are
5874 * called once more for the slave link, so the reset sequence
5875 * looks like the following.
5876 *
5877 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5878 * softreset(M) -> postreset(M) -> postreset(S)
5879 *
5880 * Note that softreset is called only for the master. Softreset
5881 * resets both M/S by definition, so SRST on master should handle
5882 * both (the standard method will work just fine).
5883 *
5884 * LOCKING:
5885 * Should be called before host is registered.
5886 *
5887 * RETURNS:
5888 * 0 on success, -errno on failure.
5889 */
5890 int ata_slave_link_init(struct ata_port *ap)
5891 {
5892 struct ata_link *link;
5893
5894 WARN_ON(ap->slave_link);
5895 WARN_ON(ap->flags & ATA_FLAG_PMP);
5896
5897 link = kzalloc(sizeof(*link), GFP_KERNEL);
5898 if (!link)
5899 return -ENOMEM;
5900
5901 ata_link_init(ap, link, 1);
5902 ap->slave_link = link;
5903 return 0;
5904 }
5905
5906 static void ata_host_stop(struct device *gendev, void *res)
5907 {
5908 struct ata_host *host = dev_get_drvdata(gendev);
5909 int i;
5910
5911 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5912
5913 for (i = 0; i < host->n_ports; i++) {
5914 struct ata_port *ap = host->ports[i];
5915
5916 if (ap->ops->port_stop)
5917 ap->ops->port_stop(ap);
5918 }
5919
5920 if (host->ops->host_stop)
5921 host->ops->host_stop(host);
5922 }
5923
5924 /**
5925 * ata_finalize_port_ops - finalize ata_port_operations
5926 * @ops: ata_port_operations to finalize
5927 *
5928 * An ata_port_operations can inherit from another ops and that
5929 * ops can again inherit from another. This can go on as many
5930 * times as necessary as long as there is no loop in the
5931 * inheritance chain.
5932 *
5933 * Ops tables are finalized when the host is started. NULL or
5934 * unspecified entries are inherited from the closet ancestor
5935 * which has the method and the entry is populated with it.
5936 * After finalization, the ops table directly points to all the
5937 * methods and ->inherits is no longer necessary and cleared.
5938 *
5939 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5940 *
5941 * LOCKING:
5942 * None.
5943 */
5944 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5945 {
5946 static DEFINE_SPINLOCK(lock);
5947 const struct ata_port_operations *cur;
5948 void **begin = (void **)ops;
5949 void **end = (void **)&ops->inherits;
5950 void **pp;
5951
5952 if (!ops || !ops->inherits)
5953 return;
5954
5955 spin_lock(&lock);
5956
5957 for (cur = ops->inherits; cur; cur = cur->inherits) {
5958 void **inherit = (void **)cur;
5959
5960 for (pp = begin; pp < end; pp++, inherit++)
5961 if (!*pp)
5962 *pp = *inherit;
5963 }
5964
5965 for (pp = begin; pp < end; pp++)
5966 if (IS_ERR(*pp))
5967 *pp = NULL;
5968
5969 ops->inherits = NULL;
5970
5971 spin_unlock(&lock);
5972 }
5973
5974 /**
5975 * ata_host_start - start and freeze ports of an ATA host
5976 * @host: ATA host to start ports for
5977 *
5978 * Start and then freeze ports of @host. Started status is
5979 * recorded in host->flags, so this function can be called
5980 * multiple times. Ports are guaranteed to get started only
5981 * once. If host->ops isn't initialized yet, its set to the
5982 * first non-dummy port ops.
5983 *
5984 * LOCKING:
5985 * Inherited from calling layer (may sleep).
5986 *
5987 * RETURNS:
5988 * 0 if all ports are started successfully, -errno otherwise.
5989 */
5990 int ata_host_start(struct ata_host *host)
5991 {
5992 int have_stop = 0;
5993 void *start_dr = NULL;
5994 int i, rc;
5995
5996 if (host->flags & ATA_HOST_STARTED)
5997 return 0;
5998
5999 ata_finalize_port_ops(host->ops);
6000
6001 for (i = 0; i < host->n_ports; i++) {
6002 struct ata_port *ap = host->ports[i];
6003
6004 ata_finalize_port_ops(ap->ops);
6005
6006 if (!host->ops && !ata_port_is_dummy(ap))
6007 host->ops = ap->ops;
6008
6009 if (ap->ops->port_stop)
6010 have_stop = 1;
6011 }
6012
6013 if (host->ops->host_stop)
6014 have_stop = 1;
6015
6016 if (have_stop) {
6017 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6018 if (!start_dr)
6019 return -ENOMEM;
6020 }
6021
6022 for (i = 0; i < host->n_ports; i++) {
6023 struct ata_port *ap = host->ports[i];
6024
6025 if (ap->ops->port_start) {
6026 rc = ap->ops->port_start(ap);
6027 if (rc) {
6028 if (rc != -ENODEV)
6029 dev_err(host->dev,
6030 "failed to start port %d (errno=%d)\n",
6031 i, rc);
6032 goto err_out;
6033 }
6034 }
6035 ata_eh_freeze_port(ap);
6036 }
6037
6038 if (start_dr)
6039 devres_add(host->dev, start_dr);
6040 host->flags |= ATA_HOST_STARTED;
6041 return 0;
6042
6043 err_out:
6044 while (--i >= 0) {
6045 struct ata_port *ap = host->ports[i];
6046
6047 if (ap->ops->port_stop)
6048 ap->ops->port_stop(ap);
6049 }
6050 devres_free(start_dr);
6051 return rc;
6052 }
6053
6054 /**
6055 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6056 * @host: host to initialize
6057 * @dev: device host is attached to
6058 * @ops: port_ops
6059 *
6060 */
6061 void ata_host_init(struct ata_host *host, struct device *dev,
6062 struct ata_port_operations *ops)
6063 {
6064 spin_lock_init(&host->lock);
6065 mutex_init(&host->eh_mutex);
6066 host->n_tags = ATA_MAX_QUEUE - 1;
6067 host->dev = dev;
6068 host->ops = ops;
6069 }
6070
6071 void __ata_port_probe(struct ata_port *ap)
6072 {
6073 struct ata_eh_info *ehi = &ap->link.eh_info;
6074 unsigned long flags;
6075
6076 /* kick EH for boot probing */
6077 spin_lock_irqsave(ap->lock, flags);
6078
6079 ehi->probe_mask |= ATA_ALL_DEVICES;
6080 ehi->action |= ATA_EH_RESET;
6081 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6082
6083 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6084 ap->pflags |= ATA_PFLAG_LOADING;
6085 ata_port_schedule_eh(ap);
6086
6087 spin_unlock_irqrestore(ap->lock, flags);
6088 }
6089
6090 int ata_port_probe(struct ata_port *ap)
6091 {
6092 int rc = 0;
6093
6094 if (ap->ops->error_handler) {
6095 __ata_port_probe(ap);
6096 ata_port_wait_eh(ap);
6097 } else {
6098 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6099 rc = ata_bus_probe(ap);
6100 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6101 }
6102 return rc;
6103 }
6104
6105
6106 static void async_port_probe(void *data, async_cookie_t cookie)
6107 {
6108 struct ata_port *ap = data;
6109
6110 /*
6111 * If we're not allowed to scan this host in parallel,
6112 * we need to wait until all previous scans have completed
6113 * before going further.
6114 * Jeff Garzik says this is only within a controller, so we
6115 * don't need to wait for port 0, only for later ports.
6116 */
6117 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6118 async_synchronize_cookie(cookie);
6119
6120 (void)ata_port_probe(ap);
6121
6122 /* in order to keep device order, we need to synchronize at this point */
6123 async_synchronize_cookie(cookie);
6124
6125 ata_scsi_scan_host(ap, 1);
6126 }
6127
6128 /**
6129 * ata_host_register - register initialized ATA host
6130 * @host: ATA host to register
6131 * @sht: template for SCSI host
6132 *
6133 * Register initialized ATA host. @host is allocated using
6134 * ata_host_alloc() and fully initialized by LLD. This function
6135 * starts ports, registers @host with ATA and SCSI layers and
6136 * probe registered devices.
6137 *
6138 * LOCKING:
6139 * Inherited from calling layer (may sleep).
6140 *
6141 * RETURNS:
6142 * 0 on success, -errno otherwise.
6143 */
6144 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6145 {
6146 int i, rc;
6147
6148 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6149
6150 /* host must have been started */
6151 if (!(host->flags & ATA_HOST_STARTED)) {
6152 dev_err(host->dev, "BUG: trying to register unstarted host\n");
6153 WARN_ON(1);
6154 return -EINVAL;
6155 }
6156
6157 /* Blow away unused ports. This happens when LLD can't
6158 * determine the exact number of ports to allocate at
6159 * allocation time.
6160 */
6161 for (i = host->n_ports; host->ports[i]; i++)
6162 kfree(host->ports[i]);
6163
6164 /* give ports names and add SCSI hosts */
6165 for (i = 0; i < host->n_ports; i++) {
6166 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6167 host->ports[i]->local_port_no = i + 1;
6168 }
6169
6170 /* Create associated sysfs transport objects */
6171 for (i = 0; i < host->n_ports; i++) {
6172 rc = ata_tport_add(host->dev,host->ports[i]);
6173 if (rc) {
6174 goto err_tadd;
6175 }
6176 }
6177
6178 rc = ata_scsi_add_hosts(host, sht);
6179 if (rc)
6180 goto err_tadd;
6181
6182 /* set cable, sata_spd_limit and report */
6183 for (i = 0; i < host->n_ports; i++) {
6184 struct ata_port *ap = host->ports[i];
6185 unsigned long xfer_mask;
6186
6187 /* set SATA cable type if still unset */
6188 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6189 ap->cbl = ATA_CBL_SATA;
6190
6191 /* init sata_spd_limit to the current value */
6192 sata_link_init_spd(&ap->link);
6193 if (ap->slave_link)
6194 sata_link_init_spd(ap->slave_link);
6195
6196 /* print per-port info to dmesg */
6197 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6198 ap->udma_mask);
6199
6200 if (!ata_port_is_dummy(ap)) {
6201 ata_port_info(ap, "%cATA max %s %s\n",
6202 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6203 ata_mode_string(xfer_mask),
6204 ap->link.eh_info.desc);
6205 ata_ehi_clear_desc(&ap->link.eh_info);
6206 } else
6207 ata_port_info(ap, "DUMMY\n");
6208 }
6209
6210 /* perform each probe asynchronously */
6211 for (i = 0; i < host->n_ports; i++) {
6212 struct ata_port *ap = host->ports[i];
6213 async_schedule(async_port_probe, ap);
6214 }
6215
6216 return 0;
6217
6218 err_tadd:
6219 while (--i >= 0) {
6220 ata_tport_delete(host->ports[i]);
6221 }
6222 return rc;
6223
6224 }
6225
6226 /**
6227 * ata_host_activate - start host, request IRQ and register it
6228 * @host: target ATA host
6229 * @irq: IRQ to request
6230 * @irq_handler: irq_handler used when requesting IRQ
6231 * @irq_flags: irq_flags used when requesting IRQ
6232 * @sht: scsi_host_template to use when registering the host
6233 *
6234 * After allocating an ATA host and initializing it, most libata
6235 * LLDs perform three steps to activate the host - start host,
6236 * request IRQ and register it. This helper takes necessasry
6237 * arguments and performs the three steps in one go.
6238 *
6239 * An invalid IRQ skips the IRQ registration and expects the host to
6240 * have set polling mode on the port. In this case, @irq_handler
6241 * should be NULL.
6242 *
6243 * LOCKING:
6244 * Inherited from calling layer (may sleep).
6245 *
6246 * RETURNS:
6247 * 0 on success, -errno otherwise.
6248 */
6249 int ata_host_activate(struct ata_host *host, int irq,
6250 irq_handler_t irq_handler, unsigned long irq_flags,
6251 struct scsi_host_template *sht)
6252 {
6253 int i, rc;
6254
6255 rc = ata_host_start(host);
6256 if (rc)
6257 return rc;
6258
6259 /* Special case for polling mode */
6260 if (!irq) {
6261 WARN_ON(irq_handler);
6262 return ata_host_register(host, sht);
6263 }
6264
6265 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6266 dev_name(host->dev), host);
6267 if (rc)
6268 return rc;
6269
6270 for (i = 0; i < host->n_ports; i++)
6271 ata_port_desc(host->ports[i], "irq %d", irq);
6272
6273 rc = ata_host_register(host, sht);
6274 /* if failed, just free the IRQ and leave ports alone */
6275 if (rc)
6276 devm_free_irq(host->dev, irq, host);
6277
6278 return rc;
6279 }
6280
6281 /**
6282 * ata_port_detach - Detach ATA port in prepration of device removal
6283 * @ap: ATA port to be detached
6284 *
6285 * Detach all ATA devices and the associated SCSI devices of @ap;
6286 * then, remove the associated SCSI host. @ap is guaranteed to
6287 * be quiescent on return from this function.
6288 *
6289 * LOCKING:
6290 * Kernel thread context (may sleep).
6291 */
6292 static void ata_port_detach(struct ata_port *ap)
6293 {
6294 unsigned long flags;
6295 struct ata_link *link;
6296 struct ata_device *dev;
6297
6298 if (!ap->ops->error_handler)
6299 goto skip_eh;
6300
6301 /* tell EH we're leaving & flush EH */
6302 spin_lock_irqsave(ap->lock, flags);
6303 ap->pflags |= ATA_PFLAG_UNLOADING;
6304 ata_port_schedule_eh(ap);
6305 spin_unlock_irqrestore(ap->lock, flags);
6306
6307 /* wait till EH commits suicide */
6308 ata_port_wait_eh(ap);
6309
6310 /* it better be dead now */
6311 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6312
6313 cancel_delayed_work_sync(&ap->hotplug_task);
6314
6315 skip_eh:
6316 /* clean up zpodd on port removal */
6317 ata_for_each_link(link, ap, HOST_FIRST) {
6318 ata_for_each_dev(dev, link, ALL) {
6319 if (zpodd_dev_enabled(dev))
6320 zpodd_exit(dev);
6321 }
6322 }
6323 if (ap->pmp_link) {
6324 int i;
6325 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6326 ata_tlink_delete(&ap->pmp_link[i]);
6327 }
6328 /* remove the associated SCSI host */
6329 scsi_remove_host(ap->scsi_host);
6330 ata_tport_delete(ap);
6331 }
6332
6333 /**
6334 * ata_host_detach - Detach all ports of an ATA host
6335 * @host: Host to detach
6336 *
6337 * Detach all ports of @host.
6338 *
6339 * LOCKING:
6340 * Kernel thread context (may sleep).
6341 */
6342 void ata_host_detach(struct ata_host *host)
6343 {
6344 int i;
6345
6346 for (i = 0; i < host->n_ports; i++)
6347 ata_port_detach(host->ports[i]);
6348
6349 /* the host is dead now, dissociate ACPI */
6350 ata_acpi_dissociate(host);
6351 }
6352
6353 #ifdef CONFIG_PCI
6354
6355 /**
6356 * ata_pci_remove_one - PCI layer callback for device removal
6357 * @pdev: PCI device that was removed
6358 *
6359 * PCI layer indicates to libata via this hook that hot-unplug or
6360 * module unload event has occurred. Detach all ports. Resource
6361 * release is handled via devres.
6362 *
6363 * LOCKING:
6364 * Inherited from PCI layer (may sleep).
6365 */
6366 void ata_pci_remove_one(struct pci_dev *pdev)
6367 {
6368 struct ata_host *host = pci_get_drvdata(pdev);
6369
6370 ata_host_detach(host);
6371 }
6372
6373 /* move to PCI subsystem */
6374 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6375 {
6376 unsigned long tmp = 0;
6377
6378 switch (bits->width) {
6379 case 1: {
6380 u8 tmp8 = 0;
6381 pci_read_config_byte(pdev, bits->reg, &tmp8);
6382 tmp = tmp8;
6383 break;
6384 }
6385 case 2: {
6386 u16 tmp16 = 0;
6387 pci_read_config_word(pdev, bits->reg, &tmp16);
6388 tmp = tmp16;
6389 break;
6390 }
6391 case 4: {
6392 u32 tmp32 = 0;
6393 pci_read_config_dword(pdev, bits->reg, &tmp32);
6394 tmp = tmp32;
6395 break;
6396 }
6397
6398 default:
6399 return -EINVAL;
6400 }
6401
6402 tmp &= bits->mask;
6403
6404 return (tmp == bits->val) ? 1 : 0;
6405 }
6406
6407 #ifdef CONFIG_PM
6408 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6409 {
6410 pci_save_state(pdev);
6411 pci_disable_device(pdev);
6412
6413 if (mesg.event & PM_EVENT_SLEEP)
6414 pci_set_power_state(pdev, PCI_D3hot);
6415 }
6416
6417 int ata_pci_device_do_resume(struct pci_dev *pdev)
6418 {
6419 int rc;
6420
6421 pci_set_power_state(pdev, PCI_D0);
6422 pci_restore_state(pdev);
6423
6424 rc = pcim_enable_device(pdev);
6425 if (rc) {
6426 dev_err(&pdev->dev,
6427 "failed to enable device after resume (%d)\n", rc);
6428 return rc;
6429 }
6430
6431 pci_set_master(pdev);
6432 return 0;
6433 }
6434
6435 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6436 {
6437 struct ata_host *host = pci_get_drvdata(pdev);
6438 int rc = 0;
6439
6440 rc = ata_host_suspend(host, mesg);
6441 if (rc)
6442 return rc;
6443
6444 ata_pci_device_do_suspend(pdev, mesg);
6445
6446 return 0;
6447 }
6448
6449 int ata_pci_device_resume(struct pci_dev *pdev)
6450 {
6451 struct ata_host *host = pci_get_drvdata(pdev);
6452 int rc;
6453
6454 rc = ata_pci_device_do_resume(pdev);
6455 if (rc == 0)
6456 ata_host_resume(host);
6457 return rc;
6458 }
6459 #endif /* CONFIG_PM */
6460
6461 #endif /* CONFIG_PCI */
6462
6463 /**
6464 * ata_platform_remove_one - Platform layer callback for device removal
6465 * @pdev: Platform device that was removed
6466 *
6467 * Platform layer indicates to libata via this hook that hot-unplug or
6468 * module unload event has occurred. Detach all ports. Resource
6469 * release is handled via devres.
6470 *
6471 * LOCKING:
6472 * Inherited from platform layer (may sleep).
6473 */
6474 int ata_platform_remove_one(struct platform_device *pdev)
6475 {
6476 struct ata_host *host = platform_get_drvdata(pdev);
6477
6478 ata_host_detach(host);
6479
6480 return 0;
6481 }
6482
6483 static int __init ata_parse_force_one(char **cur,
6484 struct ata_force_ent *force_ent,
6485 const char **reason)
6486 {
6487 /* FIXME: Currently, there's no way to tag init const data and
6488 * using __initdata causes build failure on some versions of
6489 * gcc. Once __initdataconst is implemented, add const to the
6490 * following structure.
6491 */
6492 static struct ata_force_param force_tbl[] __initdata = {
6493 { "40c", .cbl = ATA_CBL_PATA40 },
6494 { "80c", .cbl = ATA_CBL_PATA80 },
6495 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6496 { "unk", .cbl = ATA_CBL_PATA_UNK },
6497 { "ign", .cbl = ATA_CBL_PATA_IGN },
6498 { "sata", .cbl = ATA_CBL_SATA },
6499 { "1.5Gbps", .spd_limit = 1 },
6500 { "3.0Gbps", .spd_limit = 2 },
6501 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6502 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6503 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6504 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6505 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6506 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6507 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6508 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6509 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6510 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6511 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6512 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6513 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6514 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6515 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6516 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6517 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6518 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6519 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6520 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6521 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6522 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6523 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6524 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6525 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6526 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6527 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6528 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6529 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6530 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6531 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6532 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6533 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6534 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6535 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6536 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6537 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6538 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6539 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6540 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6541 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6542 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6543 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
6544 };
6545 char *start = *cur, *p = *cur;
6546 char *id, *val, *endp;
6547 const struct ata_force_param *match_fp = NULL;
6548 int nr_matches = 0, i;
6549
6550 /* find where this param ends and update *cur */
6551 while (*p != '\0' && *p != ',')
6552 p++;
6553
6554 if (*p == '\0')
6555 *cur = p;
6556 else
6557 *cur = p + 1;
6558
6559 *p = '\0';
6560
6561 /* parse */
6562 p = strchr(start, ':');
6563 if (!p) {
6564 val = strstrip(start);
6565 goto parse_val;
6566 }
6567 *p = '\0';
6568
6569 id = strstrip(start);
6570 val = strstrip(p + 1);
6571
6572 /* parse id */
6573 p = strchr(id, '.');
6574 if (p) {
6575 *p++ = '\0';
6576 force_ent->device = simple_strtoul(p, &endp, 10);
6577 if (p == endp || *endp != '\0') {
6578 *reason = "invalid device";
6579 return -EINVAL;
6580 }
6581 }
6582
6583 force_ent->port = simple_strtoul(id, &endp, 10);
6584 if (p == endp || *endp != '\0') {
6585 *reason = "invalid port/link";
6586 return -EINVAL;
6587 }
6588
6589 parse_val:
6590 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6591 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6592 const struct ata_force_param *fp = &force_tbl[i];
6593
6594 if (strncasecmp(val, fp->name, strlen(val)))
6595 continue;
6596
6597 nr_matches++;
6598 match_fp = fp;
6599
6600 if (strcasecmp(val, fp->name) == 0) {
6601 nr_matches = 1;
6602 break;
6603 }
6604 }
6605
6606 if (!nr_matches) {
6607 *reason = "unknown value";
6608 return -EINVAL;
6609 }
6610 if (nr_matches > 1) {
6611 *reason = "ambigious value";
6612 return -EINVAL;
6613 }
6614
6615 force_ent->param = *match_fp;
6616
6617 return 0;
6618 }
6619
6620 static void __init ata_parse_force_param(void)
6621 {
6622 int idx = 0, size = 1;
6623 int last_port = -1, last_device = -1;
6624 char *p, *cur, *next;
6625
6626 /* calculate maximum number of params and allocate force_tbl */
6627 for (p = ata_force_param_buf; *p; p++)
6628 if (*p == ',')
6629 size++;
6630
6631 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6632 if (!ata_force_tbl) {
6633 printk(KERN_WARNING "ata: failed to extend force table, "
6634 "libata.force ignored\n");
6635 return;
6636 }
6637
6638 /* parse and populate the table */
6639 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6640 const char *reason = "";
6641 struct ata_force_ent te = { .port = -1, .device = -1 };
6642
6643 next = cur;
6644 if (ata_parse_force_one(&next, &te, &reason)) {
6645 printk(KERN_WARNING "ata: failed to parse force "
6646 "parameter \"%s\" (%s)\n",
6647 cur, reason);
6648 continue;
6649 }
6650
6651 if (te.port == -1) {
6652 te.port = last_port;
6653 te.device = last_device;
6654 }
6655
6656 ata_force_tbl[idx++] = te;
6657
6658 last_port = te.port;
6659 last_device = te.device;
6660 }
6661
6662 ata_force_tbl_size = idx;
6663 }
6664
6665 static int __init ata_init(void)
6666 {
6667 int rc;
6668
6669 ata_parse_force_param();
6670
6671 rc = ata_sff_init();
6672 if (rc) {
6673 kfree(ata_force_tbl);
6674 return rc;
6675 }
6676
6677 libata_transport_init();
6678 ata_scsi_transport_template = ata_attach_transport();
6679 if (!ata_scsi_transport_template) {
6680 ata_sff_exit();
6681 rc = -ENOMEM;
6682 goto err_out;
6683 }
6684
6685 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6686 return 0;
6687
6688 err_out:
6689 return rc;
6690 }
6691
6692 static void __exit ata_exit(void)
6693 {
6694 ata_release_transport(ata_scsi_transport_template);
6695 libata_transport_exit();
6696 ata_sff_exit();
6697 kfree(ata_force_tbl);
6698 }
6699
6700 subsys_initcall(ata_init);
6701 module_exit(ata_exit);
6702
6703 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6704
6705 int ata_ratelimit(void)
6706 {
6707 return __ratelimit(&ratelimit);
6708 }
6709
6710 /**
6711 * ata_msleep - ATA EH owner aware msleep
6712 * @ap: ATA port to attribute the sleep to
6713 * @msecs: duration to sleep in milliseconds
6714 *
6715 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6716 * ownership is released before going to sleep and reacquired
6717 * after the sleep is complete. IOW, other ports sharing the
6718 * @ap->host will be allowed to own the EH while this task is
6719 * sleeping.
6720 *
6721 * LOCKING:
6722 * Might sleep.
6723 */
6724 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6725 {
6726 bool owns_eh = ap && ap->host->eh_owner == current;
6727
6728 if (owns_eh)
6729 ata_eh_release(ap);
6730
6731 msleep(msecs);
6732
6733 if (owns_eh)
6734 ata_eh_acquire(ap);
6735 }
6736
6737 /**
6738 * ata_wait_register - wait until register value changes
6739 * @ap: ATA port to wait register for, can be NULL
6740 * @reg: IO-mapped register
6741 * @mask: Mask to apply to read register value
6742 * @val: Wait condition
6743 * @interval: polling interval in milliseconds
6744 * @timeout: timeout in milliseconds
6745 *
6746 * Waiting for some bits of register to change is a common
6747 * operation for ATA controllers. This function reads 32bit LE
6748 * IO-mapped register @reg and tests for the following condition.
6749 *
6750 * (*@reg & mask) != val
6751 *
6752 * If the condition is met, it returns; otherwise, the process is
6753 * repeated after @interval_msec until timeout.
6754 *
6755 * LOCKING:
6756 * Kernel thread context (may sleep)
6757 *
6758 * RETURNS:
6759 * The final register value.
6760 */
6761 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6762 unsigned long interval, unsigned long timeout)
6763 {
6764 unsigned long deadline;
6765 u32 tmp;
6766
6767 tmp = ioread32(reg);
6768
6769 /* Calculate timeout _after_ the first read to make sure
6770 * preceding writes reach the controller before starting to
6771 * eat away the timeout.
6772 */
6773 deadline = ata_deadline(jiffies, timeout);
6774
6775 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6776 ata_msleep(ap, interval);
6777 tmp = ioread32(reg);
6778 }
6779
6780 return tmp;
6781 }
6782
6783 /*
6784 * Dummy port_ops
6785 */
6786 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6787 {
6788 return AC_ERR_SYSTEM;
6789 }
6790
6791 static void ata_dummy_error_handler(struct ata_port *ap)
6792 {
6793 /* truly dummy */
6794 }
6795
6796 struct ata_port_operations ata_dummy_port_ops = {
6797 .qc_prep = ata_noop_qc_prep,
6798 .qc_issue = ata_dummy_qc_issue,
6799 .error_handler = ata_dummy_error_handler,
6800 .sched_eh = ata_std_sched_eh,
6801 .end_eh = ata_std_end_eh,
6802 };
6803
6804 const struct ata_port_info ata_dummy_port_info = {
6805 .port_ops = &ata_dummy_port_ops,
6806 };
6807
6808 /*
6809 * Utility print functions
6810 */
6811 void ata_port_printk(const struct ata_port *ap, const char *level,
6812 const char *fmt, ...)
6813 {
6814 struct va_format vaf;
6815 va_list args;
6816
6817 va_start(args, fmt);
6818
6819 vaf.fmt = fmt;
6820 vaf.va = &args;
6821
6822 printk("%sata%u: %pV", level, ap->print_id, &vaf);
6823
6824 va_end(args);
6825 }
6826 EXPORT_SYMBOL(ata_port_printk);
6827
6828 void ata_link_printk(const struct ata_link *link, const char *level,
6829 const char *fmt, ...)
6830 {
6831 struct va_format vaf;
6832 va_list args;
6833
6834 va_start(args, fmt);
6835
6836 vaf.fmt = fmt;
6837 vaf.va = &args;
6838
6839 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6840 printk("%sata%u.%02u: %pV",
6841 level, link->ap->print_id, link->pmp, &vaf);
6842 else
6843 printk("%sata%u: %pV",
6844 level, link->ap->print_id, &vaf);
6845
6846 va_end(args);
6847 }
6848 EXPORT_SYMBOL(ata_link_printk);
6849
6850 void ata_dev_printk(const struct ata_device *dev, const char *level,
6851 const char *fmt, ...)
6852 {
6853 struct va_format vaf;
6854 va_list args;
6855
6856 va_start(args, fmt);
6857
6858 vaf.fmt = fmt;
6859 vaf.va = &args;
6860
6861 printk("%sata%u.%02u: %pV",
6862 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6863 &vaf);
6864
6865 va_end(args);
6866 }
6867 EXPORT_SYMBOL(ata_dev_printk);
6868
6869 void ata_print_version(const struct device *dev, const char *version)
6870 {
6871 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6872 }
6873 EXPORT_SYMBOL(ata_print_version);
6874
6875 /*
6876 * libata is essentially a library of internal helper functions for
6877 * low-level ATA host controller drivers. As such, the API/ABI is
6878 * likely to change as new drivers are added and updated.
6879 * Do not depend on ABI/API stability.
6880 */
6881 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6882 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6883 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6884 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6885 EXPORT_SYMBOL_GPL(sata_port_ops);
6886 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6887 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6888 EXPORT_SYMBOL_GPL(ata_link_next);
6889 EXPORT_SYMBOL_GPL(ata_dev_next);
6890 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6891 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6892 EXPORT_SYMBOL_GPL(ata_host_init);
6893 EXPORT_SYMBOL_GPL(ata_host_alloc);
6894 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6895 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6896 EXPORT_SYMBOL_GPL(ata_host_start);
6897 EXPORT_SYMBOL_GPL(ata_host_register);
6898 EXPORT_SYMBOL_GPL(ata_host_activate);
6899 EXPORT_SYMBOL_GPL(ata_host_detach);
6900 EXPORT_SYMBOL_GPL(ata_sg_init);
6901 EXPORT_SYMBOL_GPL(ata_qc_complete);
6902 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6903 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6904 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6905 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6906 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6907 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6908 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6909 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6910 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6911 EXPORT_SYMBOL_GPL(ata_mode_string);
6912 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6913 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6914 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6915 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6916 EXPORT_SYMBOL_GPL(ata_dev_disable);
6917 EXPORT_SYMBOL_GPL(sata_set_spd);
6918 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6919 EXPORT_SYMBOL_GPL(sata_link_debounce);
6920 EXPORT_SYMBOL_GPL(sata_link_resume);
6921 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
6922 EXPORT_SYMBOL_GPL(ata_std_prereset);
6923 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6924 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6925 EXPORT_SYMBOL_GPL(ata_std_postreset);
6926 EXPORT_SYMBOL_GPL(ata_dev_classify);
6927 EXPORT_SYMBOL_GPL(ata_dev_pair);
6928 EXPORT_SYMBOL_GPL(ata_ratelimit);
6929 EXPORT_SYMBOL_GPL(ata_msleep);
6930 EXPORT_SYMBOL_GPL(ata_wait_register);
6931 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6932 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6933 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6934 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6935 EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
6936 EXPORT_SYMBOL_GPL(sata_scr_valid);
6937 EXPORT_SYMBOL_GPL(sata_scr_read);
6938 EXPORT_SYMBOL_GPL(sata_scr_write);
6939 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6940 EXPORT_SYMBOL_GPL(ata_link_online);
6941 EXPORT_SYMBOL_GPL(ata_link_offline);
6942 #ifdef CONFIG_PM
6943 EXPORT_SYMBOL_GPL(ata_host_suspend);
6944 EXPORT_SYMBOL_GPL(ata_host_resume);
6945 #endif /* CONFIG_PM */
6946 EXPORT_SYMBOL_GPL(ata_id_string);
6947 EXPORT_SYMBOL_GPL(ata_id_c_string);
6948 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6949 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6950
6951 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6952 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6953 EXPORT_SYMBOL_GPL(ata_timing_compute);
6954 EXPORT_SYMBOL_GPL(ata_timing_merge);
6955 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6956
6957 #ifdef CONFIG_PCI
6958 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6959 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6960 #ifdef CONFIG_PM
6961 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6962 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6963 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6964 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6965 #endif /* CONFIG_PM */
6966 #endif /* CONFIG_PCI */
6967
6968 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6969
6970 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6971 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6972 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6973 EXPORT_SYMBOL_GPL(ata_port_desc);
6974 #ifdef CONFIG_PCI
6975 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6976 #endif /* CONFIG_PCI */
6977 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6978 EXPORT_SYMBOL_GPL(ata_link_abort);
6979 EXPORT_SYMBOL_GPL(ata_port_abort);
6980 EXPORT_SYMBOL_GPL(ata_port_freeze);
6981 EXPORT_SYMBOL_GPL(sata_async_notification);
6982 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6983 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6984 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6985 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6986 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6987 EXPORT_SYMBOL_GPL(ata_do_eh);
6988 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6989
6990 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6991 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6992 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6993 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6994 EXPORT_SYMBOL_GPL(ata_cable_sata);