]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/ata/libata-core.c
ata: fix CodingStyle issues in PATA timings code
[mirror_ubuntu-jammy-kernel.git] / drivers / ata / libata-core.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * libata-core.c - helper library for ATA
4 *
5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6 * Copyright 2003-2004 Jeff Garzik
7 *
8 * libata documentation is available via 'make {ps|pdf}docs',
9 * as Documentation/driver-api/libata.rst
10 *
11 * Hardware documentation available from http://www.t13.org/ and
12 * http://www.sata-io.org/
13 *
14 * Standards documents from:
15 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
16 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
17 * http://www.sata-io.org (SATA)
18 * http://www.compactflash.org (CF)
19 * http://www.qic.org (QIC157 - Tape and DSC)
20 * http://www.ce-ata.org (CE-ATA: not supported)
21 *
22 * libata is essentially a library of internal helper functions for
23 * low-level ATA host controller drivers. As such, the API/ABI is
24 * likely to change as new drivers are added and updated.
25 * Do not depend on ABI/API stability.
26 */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/init.h>
32 #include <linux/list.h>
33 #include <linux/mm.h>
34 #include <linux/spinlock.h>
35 #include <linux/blkdev.h>
36 #include <linux/delay.h>
37 #include <linux/timer.h>
38 #include <linux/time.h>
39 #include <linux/interrupt.h>
40 #include <linux/completion.h>
41 #include <linux/suspend.h>
42 #include <linux/workqueue.h>
43 #include <linux/scatterlist.h>
44 #include <linux/io.h>
45 #include <linux/async.h>
46 #include <linux/log2.h>
47 #include <linux/slab.h>
48 #include <linux/glob.h>
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_cmnd.h>
51 #include <scsi/scsi_host.h>
52 #include <linux/libata.h>
53 #include <asm/byteorder.h>
54 #include <asm/unaligned.h>
55 #include <linux/cdrom.h>
56 #include <linux/ratelimit.h>
57 #include <linux/leds.h>
58 #include <linux/pm_runtime.h>
59 #include <linux/platform_device.h>
60 #include <asm/setup.h>
61
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/libata.h>
64
65 #include "libata.h"
66 #include "libata-transport.h"
67
68 /* debounce timing parameters in msecs { interval, duration, timeout } */
69 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
70 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
71 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
72 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
73 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
74 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
75
76 const struct ata_port_operations ata_base_port_ops = {
77 .prereset = ata_std_prereset,
78 .postreset = ata_std_postreset,
79 .error_handler = ata_std_error_handler,
80 .sched_eh = ata_std_sched_eh,
81 .end_eh = ata_std_end_eh,
82 };
83
84 const struct ata_port_operations sata_port_ops = {
85 .inherits = &ata_base_port_ops,
86
87 .qc_defer = ata_std_qc_defer,
88 .hardreset = sata_std_hardreset,
89 };
90 EXPORT_SYMBOL_GPL(sata_port_ops);
91
92 static unsigned int ata_dev_init_params(struct ata_device *dev,
93 u16 heads, u16 sectors);
94 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
95 static void ata_dev_xfermask(struct ata_device *dev);
96 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
97
98 atomic_t ata_print_id = ATOMIC_INIT(0);
99
100 struct ata_force_param {
101 const char *name;
102 u8 cbl;
103 u8 spd_limit;
104 unsigned long xfer_mask;
105 unsigned int horkage_on;
106 unsigned int horkage_off;
107 u16 lflags;
108 };
109
110 struct ata_force_ent {
111 int port;
112 int device;
113 struct ata_force_param param;
114 };
115
116 static struct ata_force_ent *ata_force_tbl;
117 static int ata_force_tbl_size;
118
119 static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
120 /* param_buf is thrown away after initialization, disallow read */
121 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
122 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
123
124 static int atapi_enabled = 1;
125 module_param(atapi_enabled, int, 0444);
126 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
127
128 static int atapi_dmadir = 0;
129 module_param(atapi_dmadir, int, 0444);
130 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
131
132 int atapi_passthru16 = 1;
133 module_param(atapi_passthru16, int, 0444);
134 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
135
136 int libata_fua = 0;
137 module_param_named(fua, libata_fua, int, 0444);
138 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
139
140 static int ata_ignore_hpa;
141 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
142 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
143
144 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
145 module_param_named(dma, libata_dma_mask, int, 0444);
146 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
147
148 static int ata_probe_timeout;
149 module_param(ata_probe_timeout, int, 0444);
150 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
151
152 int libata_noacpi = 0;
153 module_param_named(noacpi, libata_noacpi, int, 0444);
154 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
155
156 int libata_allow_tpm = 0;
157 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
158 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
159
160 static int atapi_an;
161 module_param(atapi_an, int, 0444);
162 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
163
164 MODULE_AUTHOR("Jeff Garzik");
165 MODULE_DESCRIPTION("Library module for ATA devices");
166 MODULE_LICENSE("GPL");
167 MODULE_VERSION(DRV_VERSION);
168
169
170 static bool ata_sstatus_online(u32 sstatus)
171 {
172 return (sstatus & 0xf) == 0x3;
173 }
174
175 /**
176 * ata_link_next - link iteration helper
177 * @link: the previous link, NULL to start
178 * @ap: ATA port containing links to iterate
179 * @mode: iteration mode, one of ATA_LITER_*
180 *
181 * LOCKING:
182 * Host lock or EH context.
183 *
184 * RETURNS:
185 * Pointer to the next link.
186 */
187 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
188 enum ata_link_iter_mode mode)
189 {
190 BUG_ON(mode != ATA_LITER_EDGE &&
191 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
192
193 /* NULL link indicates start of iteration */
194 if (!link)
195 switch (mode) {
196 case ATA_LITER_EDGE:
197 case ATA_LITER_PMP_FIRST:
198 if (sata_pmp_attached(ap))
199 return ap->pmp_link;
200 /* fall through */
201 case ATA_LITER_HOST_FIRST:
202 return &ap->link;
203 }
204
205 /* we just iterated over the host link, what's next? */
206 if (link == &ap->link)
207 switch (mode) {
208 case ATA_LITER_HOST_FIRST:
209 if (sata_pmp_attached(ap))
210 return ap->pmp_link;
211 /* fall through */
212 case ATA_LITER_PMP_FIRST:
213 if (unlikely(ap->slave_link))
214 return ap->slave_link;
215 /* fall through */
216 case ATA_LITER_EDGE:
217 return NULL;
218 }
219
220 /* slave_link excludes PMP */
221 if (unlikely(link == ap->slave_link))
222 return NULL;
223
224 /* we were over a PMP link */
225 if (++link < ap->pmp_link + ap->nr_pmp_links)
226 return link;
227
228 if (mode == ATA_LITER_PMP_FIRST)
229 return &ap->link;
230
231 return NULL;
232 }
233 EXPORT_SYMBOL_GPL(ata_link_next);
234
235 /**
236 * ata_dev_next - device iteration helper
237 * @dev: the previous device, NULL to start
238 * @link: ATA link containing devices to iterate
239 * @mode: iteration mode, one of ATA_DITER_*
240 *
241 * LOCKING:
242 * Host lock or EH context.
243 *
244 * RETURNS:
245 * Pointer to the next device.
246 */
247 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
248 enum ata_dev_iter_mode mode)
249 {
250 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
251 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
252
253 /* NULL dev indicates start of iteration */
254 if (!dev)
255 switch (mode) {
256 case ATA_DITER_ENABLED:
257 case ATA_DITER_ALL:
258 dev = link->device;
259 goto check;
260 case ATA_DITER_ENABLED_REVERSE:
261 case ATA_DITER_ALL_REVERSE:
262 dev = link->device + ata_link_max_devices(link) - 1;
263 goto check;
264 }
265
266 next:
267 /* move to the next one */
268 switch (mode) {
269 case ATA_DITER_ENABLED:
270 case ATA_DITER_ALL:
271 if (++dev < link->device + ata_link_max_devices(link))
272 goto check;
273 return NULL;
274 case ATA_DITER_ENABLED_REVERSE:
275 case ATA_DITER_ALL_REVERSE:
276 if (--dev >= link->device)
277 goto check;
278 return NULL;
279 }
280
281 check:
282 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
283 !ata_dev_enabled(dev))
284 goto next;
285 return dev;
286 }
287 EXPORT_SYMBOL_GPL(ata_dev_next);
288
289 /**
290 * ata_dev_phys_link - find physical link for a device
291 * @dev: ATA device to look up physical link for
292 *
293 * Look up physical link which @dev is attached to. Note that
294 * this is different from @dev->link only when @dev is on slave
295 * link. For all other cases, it's the same as @dev->link.
296 *
297 * LOCKING:
298 * Don't care.
299 *
300 * RETURNS:
301 * Pointer to the found physical link.
302 */
303 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
304 {
305 struct ata_port *ap = dev->link->ap;
306
307 if (!ap->slave_link)
308 return dev->link;
309 if (!dev->devno)
310 return &ap->link;
311 return ap->slave_link;
312 }
313
314 /**
315 * ata_force_cbl - force cable type according to libata.force
316 * @ap: ATA port of interest
317 *
318 * Force cable type according to libata.force and whine about it.
319 * The last entry which has matching port number is used, so it
320 * can be specified as part of device force parameters. For
321 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
322 * same effect.
323 *
324 * LOCKING:
325 * EH context.
326 */
327 void ata_force_cbl(struct ata_port *ap)
328 {
329 int i;
330
331 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
332 const struct ata_force_ent *fe = &ata_force_tbl[i];
333
334 if (fe->port != -1 && fe->port != ap->print_id)
335 continue;
336
337 if (fe->param.cbl == ATA_CBL_NONE)
338 continue;
339
340 ap->cbl = fe->param.cbl;
341 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
342 return;
343 }
344 }
345
346 /**
347 * ata_force_link_limits - force link limits according to libata.force
348 * @link: ATA link of interest
349 *
350 * Force link flags and SATA spd limit according to libata.force
351 * and whine about it. When only the port part is specified
352 * (e.g. 1:), the limit applies to all links connected to both
353 * the host link and all fan-out ports connected via PMP. If the
354 * device part is specified as 0 (e.g. 1.00:), it specifies the
355 * first fan-out link not the host link. Device number 15 always
356 * points to the host link whether PMP is attached or not. If the
357 * controller has slave link, device number 16 points to it.
358 *
359 * LOCKING:
360 * EH context.
361 */
362 static void ata_force_link_limits(struct ata_link *link)
363 {
364 bool did_spd = false;
365 int linkno = link->pmp;
366 int i;
367
368 if (ata_is_host_link(link))
369 linkno += 15;
370
371 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
372 const struct ata_force_ent *fe = &ata_force_tbl[i];
373
374 if (fe->port != -1 && fe->port != link->ap->print_id)
375 continue;
376
377 if (fe->device != -1 && fe->device != linkno)
378 continue;
379
380 /* only honor the first spd limit */
381 if (!did_spd && fe->param.spd_limit) {
382 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
383 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
384 fe->param.name);
385 did_spd = true;
386 }
387
388 /* let lflags stack */
389 if (fe->param.lflags) {
390 link->flags |= fe->param.lflags;
391 ata_link_notice(link,
392 "FORCE: link flag 0x%x forced -> 0x%x\n",
393 fe->param.lflags, link->flags);
394 }
395 }
396 }
397
398 /**
399 * ata_force_xfermask - force xfermask according to libata.force
400 * @dev: ATA device of interest
401 *
402 * Force xfer_mask according to libata.force and whine about it.
403 * For consistency with link selection, device number 15 selects
404 * the first device connected to the host link.
405 *
406 * LOCKING:
407 * EH context.
408 */
409 static void ata_force_xfermask(struct ata_device *dev)
410 {
411 int devno = dev->link->pmp + dev->devno;
412 int alt_devno = devno;
413 int i;
414
415 /* allow n.15/16 for devices attached to host port */
416 if (ata_is_host_link(dev->link))
417 alt_devno += 15;
418
419 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
420 const struct ata_force_ent *fe = &ata_force_tbl[i];
421 unsigned long pio_mask, mwdma_mask, udma_mask;
422
423 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
424 continue;
425
426 if (fe->device != -1 && fe->device != devno &&
427 fe->device != alt_devno)
428 continue;
429
430 if (!fe->param.xfer_mask)
431 continue;
432
433 ata_unpack_xfermask(fe->param.xfer_mask,
434 &pio_mask, &mwdma_mask, &udma_mask);
435 if (udma_mask)
436 dev->udma_mask = udma_mask;
437 else if (mwdma_mask) {
438 dev->udma_mask = 0;
439 dev->mwdma_mask = mwdma_mask;
440 } else {
441 dev->udma_mask = 0;
442 dev->mwdma_mask = 0;
443 dev->pio_mask = pio_mask;
444 }
445
446 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
447 fe->param.name);
448 return;
449 }
450 }
451
452 /**
453 * ata_force_horkage - force horkage according to libata.force
454 * @dev: ATA device of interest
455 *
456 * Force horkage according to libata.force and whine about it.
457 * For consistency with link selection, device number 15 selects
458 * the first device connected to the host link.
459 *
460 * LOCKING:
461 * EH context.
462 */
463 static void ata_force_horkage(struct ata_device *dev)
464 {
465 int devno = dev->link->pmp + dev->devno;
466 int alt_devno = devno;
467 int i;
468
469 /* allow n.15/16 for devices attached to host port */
470 if (ata_is_host_link(dev->link))
471 alt_devno += 15;
472
473 for (i = 0; i < ata_force_tbl_size; i++) {
474 const struct ata_force_ent *fe = &ata_force_tbl[i];
475
476 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
477 continue;
478
479 if (fe->device != -1 && fe->device != devno &&
480 fe->device != alt_devno)
481 continue;
482
483 if (!(~dev->horkage & fe->param.horkage_on) &&
484 !(dev->horkage & fe->param.horkage_off))
485 continue;
486
487 dev->horkage |= fe->param.horkage_on;
488 dev->horkage &= ~fe->param.horkage_off;
489
490 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
491 fe->param.name);
492 }
493 }
494
495 /**
496 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
497 * @opcode: SCSI opcode
498 *
499 * Determine ATAPI command type from @opcode.
500 *
501 * LOCKING:
502 * None.
503 *
504 * RETURNS:
505 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
506 */
507 int atapi_cmd_type(u8 opcode)
508 {
509 switch (opcode) {
510 case GPCMD_READ_10:
511 case GPCMD_READ_12:
512 return ATAPI_READ;
513
514 case GPCMD_WRITE_10:
515 case GPCMD_WRITE_12:
516 case GPCMD_WRITE_AND_VERIFY_10:
517 return ATAPI_WRITE;
518
519 case GPCMD_READ_CD:
520 case GPCMD_READ_CD_MSF:
521 return ATAPI_READ_CD;
522
523 case ATA_16:
524 case ATA_12:
525 if (atapi_passthru16)
526 return ATAPI_PASS_THRU;
527 /* fall thru */
528 default:
529 return ATAPI_MISC;
530 }
531 }
532 EXPORT_SYMBOL_GPL(atapi_cmd_type);
533
534 /**
535 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
536 * @tf: Taskfile to convert
537 * @pmp: Port multiplier port
538 * @is_cmd: This FIS is for command
539 * @fis: Buffer into which data will output
540 *
541 * Converts a standard ATA taskfile to a Serial ATA
542 * FIS structure (Register - Host to Device).
543 *
544 * LOCKING:
545 * Inherited from caller.
546 */
547 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
548 {
549 fis[0] = 0x27; /* Register - Host to Device FIS */
550 fis[1] = pmp & 0xf; /* Port multiplier number*/
551 if (is_cmd)
552 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
553
554 fis[2] = tf->command;
555 fis[3] = tf->feature;
556
557 fis[4] = tf->lbal;
558 fis[5] = tf->lbam;
559 fis[6] = tf->lbah;
560 fis[7] = tf->device;
561
562 fis[8] = tf->hob_lbal;
563 fis[9] = tf->hob_lbam;
564 fis[10] = tf->hob_lbah;
565 fis[11] = tf->hob_feature;
566
567 fis[12] = tf->nsect;
568 fis[13] = tf->hob_nsect;
569 fis[14] = 0;
570 fis[15] = tf->ctl;
571
572 fis[16] = tf->auxiliary & 0xff;
573 fis[17] = (tf->auxiliary >> 8) & 0xff;
574 fis[18] = (tf->auxiliary >> 16) & 0xff;
575 fis[19] = (tf->auxiliary >> 24) & 0xff;
576 }
577 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
578
579 /**
580 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
581 * @fis: Buffer from which data will be input
582 * @tf: Taskfile to output
583 *
584 * Converts a serial ATA FIS structure to a standard ATA taskfile.
585 *
586 * LOCKING:
587 * Inherited from caller.
588 */
589
590 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
591 {
592 tf->command = fis[2]; /* status */
593 tf->feature = fis[3]; /* error */
594
595 tf->lbal = fis[4];
596 tf->lbam = fis[5];
597 tf->lbah = fis[6];
598 tf->device = fis[7];
599
600 tf->hob_lbal = fis[8];
601 tf->hob_lbam = fis[9];
602 tf->hob_lbah = fis[10];
603
604 tf->nsect = fis[12];
605 tf->hob_nsect = fis[13];
606 }
607 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
608
609 static const u8 ata_rw_cmds[] = {
610 /* pio multi */
611 ATA_CMD_READ_MULTI,
612 ATA_CMD_WRITE_MULTI,
613 ATA_CMD_READ_MULTI_EXT,
614 ATA_CMD_WRITE_MULTI_EXT,
615 0,
616 0,
617 0,
618 ATA_CMD_WRITE_MULTI_FUA_EXT,
619 /* pio */
620 ATA_CMD_PIO_READ,
621 ATA_CMD_PIO_WRITE,
622 ATA_CMD_PIO_READ_EXT,
623 ATA_CMD_PIO_WRITE_EXT,
624 0,
625 0,
626 0,
627 0,
628 /* dma */
629 ATA_CMD_READ,
630 ATA_CMD_WRITE,
631 ATA_CMD_READ_EXT,
632 ATA_CMD_WRITE_EXT,
633 0,
634 0,
635 0,
636 ATA_CMD_WRITE_FUA_EXT
637 };
638
639 /**
640 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
641 * @tf: command to examine and configure
642 * @dev: device tf belongs to
643 *
644 * Examine the device configuration and tf->flags to calculate
645 * the proper read/write commands and protocol to use.
646 *
647 * LOCKING:
648 * caller.
649 */
650 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
651 {
652 u8 cmd;
653
654 int index, fua, lba48, write;
655
656 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
657 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
658 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
659
660 if (dev->flags & ATA_DFLAG_PIO) {
661 tf->protocol = ATA_PROT_PIO;
662 index = dev->multi_count ? 0 : 8;
663 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
664 /* Unable to use DMA due to host limitation */
665 tf->protocol = ATA_PROT_PIO;
666 index = dev->multi_count ? 0 : 8;
667 } else {
668 tf->protocol = ATA_PROT_DMA;
669 index = 16;
670 }
671
672 cmd = ata_rw_cmds[index + fua + lba48 + write];
673 if (cmd) {
674 tf->command = cmd;
675 return 0;
676 }
677 return -1;
678 }
679
680 /**
681 * ata_tf_read_block - Read block address from ATA taskfile
682 * @tf: ATA taskfile of interest
683 * @dev: ATA device @tf belongs to
684 *
685 * LOCKING:
686 * None.
687 *
688 * Read block address from @tf. This function can handle all
689 * three address formats - LBA, LBA48 and CHS. tf->protocol and
690 * flags select the address format to use.
691 *
692 * RETURNS:
693 * Block address read from @tf.
694 */
695 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
696 {
697 u64 block = 0;
698
699 if (tf->flags & ATA_TFLAG_LBA) {
700 if (tf->flags & ATA_TFLAG_LBA48) {
701 block |= (u64)tf->hob_lbah << 40;
702 block |= (u64)tf->hob_lbam << 32;
703 block |= (u64)tf->hob_lbal << 24;
704 } else
705 block |= (tf->device & 0xf) << 24;
706
707 block |= tf->lbah << 16;
708 block |= tf->lbam << 8;
709 block |= tf->lbal;
710 } else {
711 u32 cyl, head, sect;
712
713 cyl = tf->lbam | (tf->lbah << 8);
714 head = tf->device & 0xf;
715 sect = tf->lbal;
716
717 if (!sect) {
718 ata_dev_warn(dev,
719 "device reported invalid CHS sector 0\n");
720 return U64_MAX;
721 }
722
723 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
724 }
725
726 return block;
727 }
728
729 /**
730 * ata_build_rw_tf - Build ATA taskfile for given read/write request
731 * @tf: Target ATA taskfile
732 * @dev: ATA device @tf belongs to
733 * @block: Block address
734 * @n_block: Number of blocks
735 * @tf_flags: RW/FUA etc...
736 * @tag: tag
737 * @class: IO priority class
738 *
739 * LOCKING:
740 * None.
741 *
742 * Build ATA taskfile @tf for read/write request described by
743 * @block, @n_block, @tf_flags and @tag on @dev.
744 *
745 * RETURNS:
746 *
747 * 0 on success, -ERANGE if the request is too large for @dev,
748 * -EINVAL if the request is invalid.
749 */
750 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
751 u64 block, u32 n_block, unsigned int tf_flags,
752 unsigned int tag, int class)
753 {
754 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
755 tf->flags |= tf_flags;
756
757 if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
758 /* yay, NCQ */
759 if (!lba_48_ok(block, n_block))
760 return -ERANGE;
761
762 tf->protocol = ATA_PROT_NCQ;
763 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
764
765 if (tf->flags & ATA_TFLAG_WRITE)
766 tf->command = ATA_CMD_FPDMA_WRITE;
767 else
768 tf->command = ATA_CMD_FPDMA_READ;
769
770 tf->nsect = tag << 3;
771 tf->hob_feature = (n_block >> 8) & 0xff;
772 tf->feature = n_block & 0xff;
773
774 tf->hob_lbah = (block >> 40) & 0xff;
775 tf->hob_lbam = (block >> 32) & 0xff;
776 tf->hob_lbal = (block >> 24) & 0xff;
777 tf->lbah = (block >> 16) & 0xff;
778 tf->lbam = (block >> 8) & 0xff;
779 tf->lbal = block & 0xff;
780
781 tf->device = ATA_LBA;
782 if (tf->flags & ATA_TFLAG_FUA)
783 tf->device |= 1 << 7;
784
785 if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
786 if (class == IOPRIO_CLASS_RT)
787 tf->hob_nsect |= ATA_PRIO_HIGH <<
788 ATA_SHIFT_PRIO;
789 }
790 } else if (dev->flags & ATA_DFLAG_LBA) {
791 tf->flags |= ATA_TFLAG_LBA;
792
793 if (lba_28_ok(block, n_block)) {
794 /* use LBA28 */
795 tf->device |= (block >> 24) & 0xf;
796 } else if (lba_48_ok(block, n_block)) {
797 if (!(dev->flags & ATA_DFLAG_LBA48))
798 return -ERANGE;
799
800 /* use LBA48 */
801 tf->flags |= ATA_TFLAG_LBA48;
802
803 tf->hob_nsect = (n_block >> 8) & 0xff;
804
805 tf->hob_lbah = (block >> 40) & 0xff;
806 tf->hob_lbam = (block >> 32) & 0xff;
807 tf->hob_lbal = (block >> 24) & 0xff;
808 } else
809 /* request too large even for LBA48 */
810 return -ERANGE;
811
812 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
813 return -EINVAL;
814
815 tf->nsect = n_block & 0xff;
816
817 tf->lbah = (block >> 16) & 0xff;
818 tf->lbam = (block >> 8) & 0xff;
819 tf->lbal = block & 0xff;
820
821 tf->device |= ATA_LBA;
822 } else {
823 /* CHS */
824 u32 sect, head, cyl, track;
825
826 /* The request -may- be too large for CHS addressing. */
827 if (!lba_28_ok(block, n_block))
828 return -ERANGE;
829
830 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
831 return -EINVAL;
832
833 /* Convert LBA to CHS */
834 track = (u32)block / dev->sectors;
835 cyl = track / dev->heads;
836 head = track % dev->heads;
837 sect = (u32)block % dev->sectors + 1;
838
839 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
840 (u32)block, track, cyl, head, sect);
841
842 /* Check whether the converted CHS can fit.
843 Cylinder: 0-65535
844 Head: 0-15
845 Sector: 1-255*/
846 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
847 return -ERANGE;
848
849 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
850 tf->lbal = sect;
851 tf->lbam = cyl;
852 tf->lbah = cyl >> 8;
853 tf->device |= head;
854 }
855
856 return 0;
857 }
858
859 /**
860 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
861 * @pio_mask: pio_mask
862 * @mwdma_mask: mwdma_mask
863 * @udma_mask: udma_mask
864 *
865 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
866 * unsigned int xfer_mask.
867 *
868 * LOCKING:
869 * None.
870 *
871 * RETURNS:
872 * Packed xfer_mask.
873 */
874 unsigned long ata_pack_xfermask(unsigned long pio_mask,
875 unsigned long mwdma_mask,
876 unsigned long udma_mask)
877 {
878 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
879 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
880 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
881 }
882 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
883
884 /**
885 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
886 * @xfer_mask: xfer_mask to unpack
887 * @pio_mask: resulting pio_mask
888 * @mwdma_mask: resulting mwdma_mask
889 * @udma_mask: resulting udma_mask
890 *
891 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
892 * Any NULL destination masks will be ignored.
893 */
894 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
895 unsigned long *mwdma_mask, unsigned long *udma_mask)
896 {
897 if (pio_mask)
898 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
899 if (mwdma_mask)
900 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
901 if (udma_mask)
902 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
903 }
904
905 static const struct ata_xfer_ent {
906 int shift, bits;
907 u8 base;
908 } ata_xfer_tbl[] = {
909 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
910 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
911 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
912 { -1, },
913 };
914
915 /**
916 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
917 * @xfer_mask: xfer_mask of interest
918 *
919 * Return matching XFER_* value for @xfer_mask. Only the highest
920 * bit of @xfer_mask is considered.
921 *
922 * LOCKING:
923 * None.
924 *
925 * RETURNS:
926 * Matching XFER_* value, 0xff if no match found.
927 */
928 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
929 {
930 int highbit = fls(xfer_mask) - 1;
931 const struct ata_xfer_ent *ent;
932
933 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
934 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
935 return ent->base + highbit - ent->shift;
936 return 0xff;
937 }
938 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
939
940 /**
941 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
942 * @xfer_mode: XFER_* of interest
943 *
944 * Return matching xfer_mask for @xfer_mode.
945 *
946 * LOCKING:
947 * None.
948 *
949 * RETURNS:
950 * Matching xfer_mask, 0 if no match found.
951 */
952 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
953 {
954 const struct ata_xfer_ent *ent;
955
956 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
957 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
958 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
959 & ~((1 << ent->shift) - 1);
960 return 0;
961 }
962 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
963
964 /**
965 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
966 * @xfer_mode: XFER_* of interest
967 *
968 * Return matching xfer_shift for @xfer_mode.
969 *
970 * LOCKING:
971 * None.
972 *
973 * RETURNS:
974 * Matching xfer_shift, -1 if no match found.
975 */
976 int ata_xfer_mode2shift(unsigned long xfer_mode)
977 {
978 const struct ata_xfer_ent *ent;
979
980 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
981 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
982 return ent->shift;
983 return -1;
984 }
985 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
986
987 /**
988 * ata_mode_string - convert xfer_mask to string
989 * @xfer_mask: mask of bits supported; only highest bit counts.
990 *
991 * Determine string which represents the highest speed
992 * (highest bit in @modemask).
993 *
994 * LOCKING:
995 * None.
996 *
997 * RETURNS:
998 * Constant C string representing highest speed listed in
999 * @mode_mask, or the constant C string "<n/a>".
1000 */
1001 const char *ata_mode_string(unsigned long xfer_mask)
1002 {
1003 static const char * const xfer_mode_str[] = {
1004 "PIO0",
1005 "PIO1",
1006 "PIO2",
1007 "PIO3",
1008 "PIO4",
1009 "PIO5",
1010 "PIO6",
1011 "MWDMA0",
1012 "MWDMA1",
1013 "MWDMA2",
1014 "MWDMA3",
1015 "MWDMA4",
1016 "UDMA/16",
1017 "UDMA/25",
1018 "UDMA/33",
1019 "UDMA/44",
1020 "UDMA/66",
1021 "UDMA/100",
1022 "UDMA/133",
1023 "UDMA7",
1024 };
1025 int highbit;
1026
1027 highbit = fls(xfer_mask) - 1;
1028 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1029 return xfer_mode_str[highbit];
1030 return "<n/a>";
1031 }
1032 EXPORT_SYMBOL_GPL(ata_mode_string);
1033
1034 const char *sata_spd_string(unsigned int spd)
1035 {
1036 static const char * const spd_str[] = {
1037 "1.5 Gbps",
1038 "3.0 Gbps",
1039 "6.0 Gbps",
1040 };
1041
1042 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1043 return "<unknown>";
1044 return spd_str[spd - 1];
1045 }
1046
1047 /**
1048 * ata_dev_classify - determine device type based on ATA-spec signature
1049 * @tf: ATA taskfile register set for device to be identified
1050 *
1051 * Determine from taskfile register contents whether a device is
1052 * ATA or ATAPI, as per "Signature and persistence" section
1053 * of ATA/PI spec (volume 1, sect 5.14).
1054 *
1055 * LOCKING:
1056 * None.
1057 *
1058 * RETURNS:
1059 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1060 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1061 */
1062 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1063 {
1064 /* Apple's open source Darwin code hints that some devices only
1065 * put a proper signature into the LBA mid/high registers,
1066 * So, we only check those. It's sufficient for uniqueness.
1067 *
1068 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1069 * signatures for ATA and ATAPI devices attached on SerialATA,
1070 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1071 * spec has never mentioned about using different signatures
1072 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1073 * Multiplier specification began to use 0x69/0x96 to identify
1074 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1075 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1076 * 0x69/0x96 shortly and described them as reserved for
1077 * SerialATA.
1078 *
1079 * We follow the current spec and consider that 0x69/0x96
1080 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1081 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1082 * SEMB signature. This is worked around in
1083 * ata_dev_read_id().
1084 */
1085 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1086 DPRINTK("found ATA device by sig\n");
1087 return ATA_DEV_ATA;
1088 }
1089
1090 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1091 DPRINTK("found ATAPI device by sig\n");
1092 return ATA_DEV_ATAPI;
1093 }
1094
1095 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1096 DPRINTK("found PMP device by sig\n");
1097 return ATA_DEV_PMP;
1098 }
1099
1100 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1101 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1102 return ATA_DEV_SEMB;
1103 }
1104
1105 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1106 DPRINTK("found ZAC device by sig\n");
1107 return ATA_DEV_ZAC;
1108 }
1109
1110 DPRINTK("unknown device\n");
1111 return ATA_DEV_UNKNOWN;
1112 }
1113 EXPORT_SYMBOL_GPL(ata_dev_classify);
1114
1115 /**
1116 * ata_id_string - Convert IDENTIFY DEVICE page into string
1117 * @id: IDENTIFY DEVICE results we will examine
1118 * @s: string into which data is output
1119 * @ofs: offset into identify device page
1120 * @len: length of string to return. must be an even number.
1121 *
1122 * The strings in the IDENTIFY DEVICE page are broken up into
1123 * 16-bit chunks. Run through the string, and output each
1124 * 8-bit chunk linearly, regardless of platform.
1125 *
1126 * LOCKING:
1127 * caller.
1128 */
1129
1130 void ata_id_string(const u16 *id, unsigned char *s,
1131 unsigned int ofs, unsigned int len)
1132 {
1133 unsigned int c;
1134
1135 BUG_ON(len & 1);
1136
1137 while (len > 0) {
1138 c = id[ofs] >> 8;
1139 *s = c;
1140 s++;
1141
1142 c = id[ofs] & 0xff;
1143 *s = c;
1144 s++;
1145
1146 ofs++;
1147 len -= 2;
1148 }
1149 }
1150 EXPORT_SYMBOL_GPL(ata_id_string);
1151
1152 /**
1153 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1154 * @id: IDENTIFY DEVICE results we will examine
1155 * @s: string into which data is output
1156 * @ofs: offset into identify device page
1157 * @len: length of string to return. must be an odd number.
1158 *
1159 * This function is identical to ata_id_string except that it
1160 * trims trailing spaces and terminates the resulting string with
1161 * null. @len must be actual maximum length (even number) + 1.
1162 *
1163 * LOCKING:
1164 * caller.
1165 */
1166 void ata_id_c_string(const u16 *id, unsigned char *s,
1167 unsigned int ofs, unsigned int len)
1168 {
1169 unsigned char *p;
1170
1171 ata_id_string(id, s, ofs, len - 1);
1172
1173 p = s + strnlen(s, len - 1);
1174 while (p > s && p[-1] == ' ')
1175 p--;
1176 *p = '\0';
1177 }
1178 EXPORT_SYMBOL_GPL(ata_id_c_string);
1179
1180 static u64 ata_id_n_sectors(const u16 *id)
1181 {
1182 if (ata_id_has_lba(id)) {
1183 if (ata_id_has_lba48(id))
1184 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1185 else
1186 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1187 } else {
1188 if (ata_id_current_chs_valid(id))
1189 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1190 id[ATA_ID_CUR_SECTORS];
1191 else
1192 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1193 id[ATA_ID_SECTORS];
1194 }
1195 }
1196
1197 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1198 {
1199 u64 sectors = 0;
1200
1201 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1202 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1203 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1204 sectors |= (tf->lbah & 0xff) << 16;
1205 sectors |= (tf->lbam & 0xff) << 8;
1206 sectors |= (tf->lbal & 0xff);
1207
1208 return sectors;
1209 }
1210
1211 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1212 {
1213 u64 sectors = 0;
1214
1215 sectors |= (tf->device & 0x0f) << 24;
1216 sectors |= (tf->lbah & 0xff) << 16;
1217 sectors |= (tf->lbam & 0xff) << 8;
1218 sectors |= (tf->lbal & 0xff);
1219
1220 return sectors;
1221 }
1222
1223 /**
1224 * ata_read_native_max_address - Read native max address
1225 * @dev: target device
1226 * @max_sectors: out parameter for the result native max address
1227 *
1228 * Perform an LBA48 or LBA28 native size query upon the device in
1229 * question.
1230 *
1231 * RETURNS:
1232 * 0 on success, -EACCES if command is aborted by the drive.
1233 * -EIO on other errors.
1234 */
1235 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1236 {
1237 unsigned int err_mask;
1238 struct ata_taskfile tf;
1239 int lba48 = ata_id_has_lba48(dev->id);
1240
1241 ata_tf_init(dev, &tf);
1242
1243 /* always clear all address registers */
1244 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1245
1246 if (lba48) {
1247 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1248 tf.flags |= ATA_TFLAG_LBA48;
1249 } else
1250 tf.command = ATA_CMD_READ_NATIVE_MAX;
1251
1252 tf.protocol = ATA_PROT_NODATA;
1253 tf.device |= ATA_LBA;
1254
1255 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1256 if (err_mask) {
1257 ata_dev_warn(dev,
1258 "failed to read native max address (err_mask=0x%x)\n",
1259 err_mask);
1260 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1261 return -EACCES;
1262 return -EIO;
1263 }
1264
1265 if (lba48)
1266 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1267 else
1268 *max_sectors = ata_tf_to_lba(&tf) + 1;
1269 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1270 (*max_sectors)--;
1271 return 0;
1272 }
1273
1274 /**
1275 * ata_set_max_sectors - Set max sectors
1276 * @dev: target device
1277 * @new_sectors: new max sectors value to set for the device
1278 *
1279 * Set max sectors of @dev to @new_sectors.
1280 *
1281 * RETURNS:
1282 * 0 on success, -EACCES if command is aborted or denied (due to
1283 * previous non-volatile SET_MAX) by the drive. -EIO on other
1284 * errors.
1285 */
1286 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1287 {
1288 unsigned int err_mask;
1289 struct ata_taskfile tf;
1290 int lba48 = ata_id_has_lba48(dev->id);
1291
1292 new_sectors--;
1293
1294 ata_tf_init(dev, &tf);
1295
1296 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1297
1298 if (lba48) {
1299 tf.command = ATA_CMD_SET_MAX_EXT;
1300 tf.flags |= ATA_TFLAG_LBA48;
1301
1302 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1303 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1304 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1305 } else {
1306 tf.command = ATA_CMD_SET_MAX;
1307
1308 tf.device |= (new_sectors >> 24) & 0xf;
1309 }
1310
1311 tf.protocol = ATA_PROT_NODATA;
1312 tf.device |= ATA_LBA;
1313
1314 tf.lbal = (new_sectors >> 0) & 0xff;
1315 tf.lbam = (new_sectors >> 8) & 0xff;
1316 tf.lbah = (new_sectors >> 16) & 0xff;
1317
1318 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1319 if (err_mask) {
1320 ata_dev_warn(dev,
1321 "failed to set max address (err_mask=0x%x)\n",
1322 err_mask);
1323 if (err_mask == AC_ERR_DEV &&
1324 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1325 return -EACCES;
1326 return -EIO;
1327 }
1328
1329 return 0;
1330 }
1331
1332 /**
1333 * ata_hpa_resize - Resize a device with an HPA set
1334 * @dev: Device to resize
1335 *
1336 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1337 * it if required to the full size of the media. The caller must check
1338 * the drive has the HPA feature set enabled.
1339 *
1340 * RETURNS:
1341 * 0 on success, -errno on failure.
1342 */
1343 static int ata_hpa_resize(struct ata_device *dev)
1344 {
1345 struct ata_eh_context *ehc = &dev->link->eh_context;
1346 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1347 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1348 u64 sectors = ata_id_n_sectors(dev->id);
1349 u64 native_sectors;
1350 int rc;
1351
1352 /* do we need to do it? */
1353 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1354 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1355 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1356 return 0;
1357
1358 /* read native max address */
1359 rc = ata_read_native_max_address(dev, &native_sectors);
1360 if (rc) {
1361 /* If device aborted the command or HPA isn't going to
1362 * be unlocked, skip HPA resizing.
1363 */
1364 if (rc == -EACCES || !unlock_hpa) {
1365 ata_dev_warn(dev,
1366 "HPA support seems broken, skipping HPA handling\n");
1367 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1368
1369 /* we can continue if device aborted the command */
1370 if (rc == -EACCES)
1371 rc = 0;
1372 }
1373
1374 return rc;
1375 }
1376 dev->n_native_sectors = native_sectors;
1377
1378 /* nothing to do? */
1379 if (native_sectors <= sectors || !unlock_hpa) {
1380 if (!print_info || native_sectors == sectors)
1381 return 0;
1382
1383 if (native_sectors > sectors)
1384 ata_dev_info(dev,
1385 "HPA detected: current %llu, native %llu\n",
1386 (unsigned long long)sectors,
1387 (unsigned long long)native_sectors);
1388 else if (native_sectors < sectors)
1389 ata_dev_warn(dev,
1390 "native sectors (%llu) is smaller than sectors (%llu)\n",
1391 (unsigned long long)native_sectors,
1392 (unsigned long long)sectors);
1393 return 0;
1394 }
1395
1396 /* let's unlock HPA */
1397 rc = ata_set_max_sectors(dev, native_sectors);
1398 if (rc == -EACCES) {
1399 /* if device aborted the command, skip HPA resizing */
1400 ata_dev_warn(dev,
1401 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1402 (unsigned long long)sectors,
1403 (unsigned long long)native_sectors);
1404 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1405 return 0;
1406 } else if (rc)
1407 return rc;
1408
1409 /* re-read IDENTIFY data */
1410 rc = ata_dev_reread_id(dev, 0);
1411 if (rc) {
1412 ata_dev_err(dev,
1413 "failed to re-read IDENTIFY data after HPA resizing\n");
1414 return rc;
1415 }
1416
1417 if (print_info) {
1418 u64 new_sectors = ata_id_n_sectors(dev->id);
1419 ata_dev_info(dev,
1420 "HPA unlocked: %llu -> %llu, native %llu\n",
1421 (unsigned long long)sectors,
1422 (unsigned long long)new_sectors,
1423 (unsigned long long)native_sectors);
1424 }
1425
1426 return 0;
1427 }
1428
1429 /**
1430 * ata_dump_id - IDENTIFY DEVICE info debugging output
1431 * @id: IDENTIFY DEVICE page to dump
1432 *
1433 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1434 * page.
1435 *
1436 * LOCKING:
1437 * caller.
1438 */
1439
1440 static inline void ata_dump_id(const u16 *id)
1441 {
1442 DPRINTK("49==0x%04x "
1443 "53==0x%04x "
1444 "63==0x%04x "
1445 "64==0x%04x "
1446 "75==0x%04x \n",
1447 id[49],
1448 id[53],
1449 id[63],
1450 id[64],
1451 id[75]);
1452 DPRINTK("80==0x%04x "
1453 "81==0x%04x "
1454 "82==0x%04x "
1455 "83==0x%04x "
1456 "84==0x%04x \n",
1457 id[80],
1458 id[81],
1459 id[82],
1460 id[83],
1461 id[84]);
1462 DPRINTK("88==0x%04x "
1463 "93==0x%04x\n",
1464 id[88],
1465 id[93]);
1466 }
1467
1468 /**
1469 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1470 * @id: IDENTIFY data to compute xfer mask from
1471 *
1472 * Compute the xfermask for this device. This is not as trivial
1473 * as it seems if we must consider early devices correctly.
1474 *
1475 * FIXME: pre IDE drive timing (do we care ?).
1476 *
1477 * LOCKING:
1478 * None.
1479 *
1480 * RETURNS:
1481 * Computed xfermask
1482 */
1483 unsigned long ata_id_xfermask(const u16 *id)
1484 {
1485 unsigned long pio_mask, mwdma_mask, udma_mask;
1486
1487 /* Usual case. Word 53 indicates word 64 is valid */
1488 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1489 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1490 pio_mask <<= 3;
1491 pio_mask |= 0x7;
1492 } else {
1493 /* If word 64 isn't valid then Word 51 high byte holds
1494 * the PIO timing number for the maximum. Turn it into
1495 * a mask.
1496 */
1497 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1498 if (mode < 5) /* Valid PIO range */
1499 pio_mask = (2 << mode) - 1;
1500 else
1501 pio_mask = 1;
1502
1503 /* But wait.. there's more. Design your standards by
1504 * committee and you too can get a free iordy field to
1505 * process. However its the speeds not the modes that
1506 * are supported... Note drivers using the timing API
1507 * will get this right anyway
1508 */
1509 }
1510
1511 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1512
1513 if (ata_id_is_cfa(id)) {
1514 /*
1515 * Process compact flash extended modes
1516 */
1517 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1518 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1519
1520 if (pio)
1521 pio_mask |= (1 << 5);
1522 if (pio > 1)
1523 pio_mask |= (1 << 6);
1524 if (dma)
1525 mwdma_mask |= (1 << 3);
1526 if (dma > 1)
1527 mwdma_mask |= (1 << 4);
1528 }
1529
1530 udma_mask = 0;
1531 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1532 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1533
1534 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1535 }
1536 EXPORT_SYMBOL_GPL(ata_id_xfermask);
1537
1538 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1539 {
1540 struct completion *waiting = qc->private_data;
1541
1542 complete(waiting);
1543 }
1544
1545 /**
1546 * ata_exec_internal_sg - execute libata internal command
1547 * @dev: Device to which the command is sent
1548 * @tf: Taskfile registers for the command and the result
1549 * @cdb: CDB for packet command
1550 * @dma_dir: Data transfer direction of the command
1551 * @sgl: sg list for the data buffer of the command
1552 * @n_elem: Number of sg entries
1553 * @timeout: Timeout in msecs (0 for default)
1554 *
1555 * Executes libata internal command with timeout. @tf contains
1556 * command on entry and result on return. Timeout and error
1557 * conditions are reported via return value. No recovery action
1558 * is taken after a command times out. It's caller's duty to
1559 * clean up after timeout.
1560 *
1561 * LOCKING:
1562 * None. Should be called with kernel context, might sleep.
1563 *
1564 * RETURNS:
1565 * Zero on success, AC_ERR_* mask on failure
1566 */
1567 unsigned ata_exec_internal_sg(struct ata_device *dev,
1568 struct ata_taskfile *tf, const u8 *cdb,
1569 int dma_dir, struct scatterlist *sgl,
1570 unsigned int n_elem, unsigned long timeout)
1571 {
1572 struct ata_link *link = dev->link;
1573 struct ata_port *ap = link->ap;
1574 u8 command = tf->command;
1575 int auto_timeout = 0;
1576 struct ata_queued_cmd *qc;
1577 unsigned int preempted_tag;
1578 u32 preempted_sactive;
1579 u64 preempted_qc_active;
1580 int preempted_nr_active_links;
1581 DECLARE_COMPLETION_ONSTACK(wait);
1582 unsigned long flags;
1583 unsigned int err_mask;
1584 int rc;
1585
1586 spin_lock_irqsave(ap->lock, flags);
1587
1588 /* no internal command while frozen */
1589 if (ap->pflags & ATA_PFLAG_FROZEN) {
1590 spin_unlock_irqrestore(ap->lock, flags);
1591 return AC_ERR_SYSTEM;
1592 }
1593
1594 /* initialize internal qc */
1595 qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
1596
1597 qc->tag = ATA_TAG_INTERNAL;
1598 qc->hw_tag = 0;
1599 qc->scsicmd = NULL;
1600 qc->ap = ap;
1601 qc->dev = dev;
1602 ata_qc_reinit(qc);
1603
1604 preempted_tag = link->active_tag;
1605 preempted_sactive = link->sactive;
1606 preempted_qc_active = ap->qc_active;
1607 preempted_nr_active_links = ap->nr_active_links;
1608 link->active_tag = ATA_TAG_POISON;
1609 link->sactive = 0;
1610 ap->qc_active = 0;
1611 ap->nr_active_links = 0;
1612
1613 /* prepare & issue qc */
1614 qc->tf = *tf;
1615 if (cdb)
1616 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1617
1618 /* some SATA bridges need us to indicate data xfer direction */
1619 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1620 dma_dir == DMA_FROM_DEVICE)
1621 qc->tf.feature |= ATAPI_DMADIR;
1622
1623 qc->flags |= ATA_QCFLAG_RESULT_TF;
1624 qc->dma_dir = dma_dir;
1625 if (dma_dir != DMA_NONE) {
1626 unsigned int i, buflen = 0;
1627 struct scatterlist *sg;
1628
1629 for_each_sg(sgl, sg, n_elem, i)
1630 buflen += sg->length;
1631
1632 ata_sg_init(qc, sgl, n_elem);
1633 qc->nbytes = buflen;
1634 }
1635
1636 qc->private_data = &wait;
1637 qc->complete_fn = ata_qc_complete_internal;
1638
1639 ata_qc_issue(qc);
1640
1641 spin_unlock_irqrestore(ap->lock, flags);
1642
1643 if (!timeout) {
1644 if (ata_probe_timeout)
1645 timeout = ata_probe_timeout * 1000;
1646 else {
1647 timeout = ata_internal_cmd_timeout(dev, command);
1648 auto_timeout = 1;
1649 }
1650 }
1651
1652 if (ap->ops->error_handler)
1653 ata_eh_release(ap);
1654
1655 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1656
1657 if (ap->ops->error_handler)
1658 ata_eh_acquire(ap);
1659
1660 ata_sff_flush_pio_task(ap);
1661
1662 if (!rc) {
1663 spin_lock_irqsave(ap->lock, flags);
1664
1665 /* We're racing with irq here. If we lose, the
1666 * following test prevents us from completing the qc
1667 * twice. If we win, the port is frozen and will be
1668 * cleaned up by ->post_internal_cmd().
1669 */
1670 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1671 qc->err_mask |= AC_ERR_TIMEOUT;
1672
1673 if (ap->ops->error_handler)
1674 ata_port_freeze(ap);
1675 else
1676 ata_qc_complete(qc);
1677
1678 if (ata_msg_warn(ap))
1679 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1680 command);
1681 }
1682
1683 spin_unlock_irqrestore(ap->lock, flags);
1684 }
1685
1686 /* do post_internal_cmd */
1687 if (ap->ops->post_internal_cmd)
1688 ap->ops->post_internal_cmd(qc);
1689
1690 /* perform minimal error analysis */
1691 if (qc->flags & ATA_QCFLAG_FAILED) {
1692 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1693 qc->err_mask |= AC_ERR_DEV;
1694
1695 if (!qc->err_mask)
1696 qc->err_mask |= AC_ERR_OTHER;
1697
1698 if (qc->err_mask & ~AC_ERR_OTHER)
1699 qc->err_mask &= ~AC_ERR_OTHER;
1700 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1701 qc->result_tf.command |= ATA_SENSE;
1702 }
1703
1704 /* finish up */
1705 spin_lock_irqsave(ap->lock, flags);
1706
1707 *tf = qc->result_tf;
1708 err_mask = qc->err_mask;
1709
1710 ata_qc_free(qc);
1711 link->active_tag = preempted_tag;
1712 link->sactive = preempted_sactive;
1713 ap->qc_active = preempted_qc_active;
1714 ap->nr_active_links = preempted_nr_active_links;
1715
1716 spin_unlock_irqrestore(ap->lock, flags);
1717
1718 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1719 ata_internal_cmd_timed_out(dev, command);
1720
1721 return err_mask;
1722 }
1723
1724 /**
1725 * ata_exec_internal - execute libata internal command
1726 * @dev: Device to which the command is sent
1727 * @tf: Taskfile registers for the command and the result
1728 * @cdb: CDB for packet command
1729 * @dma_dir: Data transfer direction of the command
1730 * @buf: Data buffer of the command
1731 * @buflen: Length of data buffer
1732 * @timeout: Timeout in msecs (0 for default)
1733 *
1734 * Wrapper around ata_exec_internal_sg() which takes simple
1735 * buffer instead of sg list.
1736 *
1737 * LOCKING:
1738 * None. Should be called with kernel context, might sleep.
1739 *
1740 * RETURNS:
1741 * Zero on success, AC_ERR_* mask on failure
1742 */
1743 unsigned ata_exec_internal(struct ata_device *dev,
1744 struct ata_taskfile *tf, const u8 *cdb,
1745 int dma_dir, void *buf, unsigned int buflen,
1746 unsigned long timeout)
1747 {
1748 struct scatterlist *psg = NULL, sg;
1749 unsigned int n_elem = 0;
1750
1751 if (dma_dir != DMA_NONE) {
1752 WARN_ON(!buf);
1753 sg_init_one(&sg, buf, buflen);
1754 psg = &sg;
1755 n_elem++;
1756 }
1757
1758 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1759 timeout);
1760 }
1761
1762 /**
1763 * ata_pio_need_iordy - check if iordy needed
1764 * @adev: ATA device
1765 *
1766 * Check if the current speed of the device requires IORDY. Used
1767 * by various controllers for chip configuration.
1768 */
1769 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1770 {
1771 /* Don't set IORDY if we're preparing for reset. IORDY may
1772 * lead to controller lock up on certain controllers if the
1773 * port is not occupied. See bko#11703 for details.
1774 */
1775 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1776 return 0;
1777 /* Controller doesn't support IORDY. Probably a pointless
1778 * check as the caller should know this.
1779 */
1780 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1781 return 0;
1782 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1783 if (ata_id_is_cfa(adev->id)
1784 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1785 return 0;
1786 /* PIO3 and higher it is mandatory */
1787 if (adev->pio_mode > XFER_PIO_2)
1788 return 1;
1789 /* We turn it on when possible */
1790 if (ata_id_has_iordy(adev->id))
1791 return 1;
1792 return 0;
1793 }
1794 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
1795
1796 /**
1797 * ata_pio_mask_no_iordy - Return the non IORDY mask
1798 * @adev: ATA device
1799 *
1800 * Compute the highest mode possible if we are not using iordy. Return
1801 * -1 if no iordy mode is available.
1802 */
1803 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1804 {
1805 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1806 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1807 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1808 /* Is the speed faster than the drive allows non IORDY ? */
1809 if (pio) {
1810 /* This is cycle times not frequency - watch the logic! */
1811 if (pio > 240) /* PIO2 is 240nS per cycle */
1812 return 3 << ATA_SHIFT_PIO;
1813 return 7 << ATA_SHIFT_PIO;
1814 }
1815 }
1816 return 3 << ATA_SHIFT_PIO;
1817 }
1818
1819 /**
1820 * ata_do_dev_read_id - default ID read method
1821 * @dev: device
1822 * @tf: proposed taskfile
1823 * @id: data buffer
1824 *
1825 * Issue the identify taskfile and hand back the buffer containing
1826 * identify data. For some RAID controllers and for pre ATA devices
1827 * this function is wrapped or replaced by the driver
1828 */
1829 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1830 struct ata_taskfile *tf, u16 *id)
1831 {
1832 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1833 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1834 }
1835 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1836
1837 /**
1838 * ata_dev_read_id - Read ID data from the specified device
1839 * @dev: target device
1840 * @p_class: pointer to class of the target device (may be changed)
1841 * @flags: ATA_READID_* flags
1842 * @id: buffer to read IDENTIFY data into
1843 *
1844 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1845 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1846 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1847 * for pre-ATA4 drives.
1848 *
1849 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1850 * now we abort if we hit that case.
1851 *
1852 * LOCKING:
1853 * Kernel thread context (may sleep)
1854 *
1855 * RETURNS:
1856 * 0 on success, -errno otherwise.
1857 */
1858 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1859 unsigned int flags, u16 *id)
1860 {
1861 struct ata_port *ap = dev->link->ap;
1862 unsigned int class = *p_class;
1863 struct ata_taskfile tf;
1864 unsigned int err_mask = 0;
1865 const char *reason;
1866 bool is_semb = class == ATA_DEV_SEMB;
1867 int may_fallback = 1, tried_spinup = 0;
1868 int rc;
1869
1870 if (ata_msg_ctl(ap))
1871 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1872
1873 retry:
1874 ata_tf_init(dev, &tf);
1875
1876 switch (class) {
1877 case ATA_DEV_SEMB:
1878 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
1879 /* fall through */
1880 case ATA_DEV_ATA:
1881 case ATA_DEV_ZAC:
1882 tf.command = ATA_CMD_ID_ATA;
1883 break;
1884 case ATA_DEV_ATAPI:
1885 tf.command = ATA_CMD_ID_ATAPI;
1886 break;
1887 default:
1888 rc = -ENODEV;
1889 reason = "unsupported class";
1890 goto err_out;
1891 }
1892
1893 tf.protocol = ATA_PROT_PIO;
1894
1895 /* Some devices choke if TF registers contain garbage. Make
1896 * sure those are properly initialized.
1897 */
1898 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1899
1900 /* Device presence detection is unreliable on some
1901 * controllers. Always poll IDENTIFY if available.
1902 */
1903 tf.flags |= ATA_TFLAG_POLLING;
1904
1905 if (ap->ops->read_id)
1906 err_mask = ap->ops->read_id(dev, &tf, id);
1907 else
1908 err_mask = ata_do_dev_read_id(dev, &tf, id);
1909
1910 if (err_mask) {
1911 if (err_mask & AC_ERR_NODEV_HINT) {
1912 ata_dev_dbg(dev, "NODEV after polling detection\n");
1913 return -ENOENT;
1914 }
1915
1916 if (is_semb) {
1917 ata_dev_info(dev,
1918 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1919 /* SEMB is not supported yet */
1920 *p_class = ATA_DEV_SEMB_UNSUP;
1921 return 0;
1922 }
1923
1924 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1925 /* Device or controller might have reported
1926 * the wrong device class. Give a shot at the
1927 * other IDENTIFY if the current one is
1928 * aborted by the device.
1929 */
1930 if (may_fallback) {
1931 may_fallback = 0;
1932
1933 if (class == ATA_DEV_ATA)
1934 class = ATA_DEV_ATAPI;
1935 else
1936 class = ATA_DEV_ATA;
1937 goto retry;
1938 }
1939
1940 /* Control reaches here iff the device aborted
1941 * both flavors of IDENTIFYs which happens
1942 * sometimes with phantom devices.
1943 */
1944 ata_dev_dbg(dev,
1945 "both IDENTIFYs aborted, assuming NODEV\n");
1946 return -ENOENT;
1947 }
1948
1949 rc = -EIO;
1950 reason = "I/O error";
1951 goto err_out;
1952 }
1953
1954 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1955 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1956 "class=%d may_fallback=%d tried_spinup=%d\n",
1957 class, may_fallback, tried_spinup);
1958 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1959 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1960 }
1961
1962 /* Falling back doesn't make sense if ID data was read
1963 * successfully at least once.
1964 */
1965 may_fallback = 0;
1966
1967 swap_buf_le16(id, ATA_ID_WORDS);
1968
1969 /* sanity check */
1970 rc = -EINVAL;
1971 reason = "device reports invalid type";
1972
1973 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1974 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1975 goto err_out;
1976 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1977 ata_id_is_ata(id)) {
1978 ata_dev_dbg(dev,
1979 "host indicates ignore ATA devices, ignored\n");
1980 return -ENOENT;
1981 }
1982 } else {
1983 if (ata_id_is_ata(id))
1984 goto err_out;
1985 }
1986
1987 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1988 tried_spinup = 1;
1989 /*
1990 * Drive powered-up in standby mode, and requires a specific
1991 * SET_FEATURES spin-up subcommand before it will accept
1992 * anything other than the original IDENTIFY command.
1993 */
1994 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1995 if (err_mask && id[2] != 0x738c) {
1996 rc = -EIO;
1997 reason = "SPINUP failed";
1998 goto err_out;
1999 }
2000 /*
2001 * If the drive initially returned incomplete IDENTIFY info,
2002 * we now must reissue the IDENTIFY command.
2003 */
2004 if (id[2] == 0x37c8)
2005 goto retry;
2006 }
2007
2008 if ((flags & ATA_READID_POSTRESET) &&
2009 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
2010 /*
2011 * The exact sequence expected by certain pre-ATA4 drives is:
2012 * SRST RESET
2013 * IDENTIFY (optional in early ATA)
2014 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2015 * anything else..
2016 * Some drives were very specific about that exact sequence.
2017 *
2018 * Note that ATA4 says lba is mandatory so the second check
2019 * should never trigger.
2020 */
2021 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2022 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2023 if (err_mask) {
2024 rc = -EIO;
2025 reason = "INIT_DEV_PARAMS failed";
2026 goto err_out;
2027 }
2028
2029 /* current CHS translation info (id[53-58]) might be
2030 * changed. reread the identify device info.
2031 */
2032 flags &= ~ATA_READID_POSTRESET;
2033 goto retry;
2034 }
2035 }
2036
2037 *p_class = class;
2038
2039 return 0;
2040
2041 err_out:
2042 if (ata_msg_warn(ap))
2043 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
2044 reason, err_mask);
2045 return rc;
2046 }
2047
2048 /**
2049 * ata_read_log_page - read a specific log page
2050 * @dev: target device
2051 * @log: log to read
2052 * @page: page to read
2053 * @buf: buffer to store read page
2054 * @sectors: number of sectors to read
2055 *
2056 * Read log page using READ_LOG_EXT command.
2057 *
2058 * LOCKING:
2059 * Kernel thread context (may sleep).
2060 *
2061 * RETURNS:
2062 * 0 on success, AC_ERR_* mask otherwise.
2063 */
2064 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
2065 u8 page, void *buf, unsigned int sectors)
2066 {
2067 unsigned long ap_flags = dev->link->ap->flags;
2068 struct ata_taskfile tf;
2069 unsigned int err_mask;
2070 bool dma = false;
2071
2072 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
2073
2074 /*
2075 * Return error without actually issuing the command on controllers
2076 * which e.g. lockup on a read log page.
2077 */
2078 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
2079 return AC_ERR_DEV;
2080
2081 retry:
2082 ata_tf_init(dev, &tf);
2083 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
2084 !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
2085 tf.command = ATA_CMD_READ_LOG_DMA_EXT;
2086 tf.protocol = ATA_PROT_DMA;
2087 dma = true;
2088 } else {
2089 tf.command = ATA_CMD_READ_LOG_EXT;
2090 tf.protocol = ATA_PROT_PIO;
2091 dma = false;
2092 }
2093 tf.lbal = log;
2094 tf.lbam = page;
2095 tf.nsect = sectors;
2096 tf.hob_nsect = sectors >> 8;
2097 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2098
2099 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2100 buf, sectors * ATA_SECT_SIZE, 0);
2101
2102 if (err_mask && dma) {
2103 dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2104 ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
2105 goto retry;
2106 }
2107
2108 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2109 return err_mask;
2110 }
2111
2112 static bool ata_log_supported(struct ata_device *dev, u8 log)
2113 {
2114 struct ata_port *ap = dev->link->ap;
2115
2116 if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2117 return false;
2118 return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
2119 }
2120
2121 static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2122 {
2123 struct ata_port *ap = dev->link->ap;
2124 unsigned int err, i;
2125
2126 if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2127 ata_dev_warn(dev, "ATA Identify Device Log not supported\n");
2128 return false;
2129 }
2130
2131 /*
2132 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2133 * supported.
2134 */
2135 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2136 1);
2137 if (err) {
2138 ata_dev_info(dev,
2139 "failed to get Device Identify Log Emask 0x%x\n",
2140 err);
2141 return false;
2142 }
2143
2144 for (i = 0; i < ap->sector_buf[8]; i++) {
2145 if (ap->sector_buf[9 + i] == page)
2146 return true;
2147 }
2148
2149 return false;
2150 }
2151
2152 static int ata_do_link_spd_horkage(struct ata_device *dev)
2153 {
2154 struct ata_link *plink = ata_dev_phys_link(dev);
2155 u32 target, target_limit;
2156
2157 if (!sata_scr_valid(plink))
2158 return 0;
2159
2160 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2161 target = 1;
2162 else
2163 return 0;
2164
2165 target_limit = (1 << target) - 1;
2166
2167 /* if already on stricter limit, no need to push further */
2168 if (plink->sata_spd_limit <= target_limit)
2169 return 0;
2170
2171 plink->sata_spd_limit = target_limit;
2172
2173 /* Request another EH round by returning -EAGAIN if link is
2174 * going faster than the target speed. Forward progress is
2175 * guaranteed by setting sata_spd_limit to target_limit above.
2176 */
2177 if (plink->sata_spd > target) {
2178 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2179 sata_spd_string(target));
2180 return -EAGAIN;
2181 }
2182 return 0;
2183 }
2184
2185 static inline u8 ata_dev_knobble(struct ata_device *dev)
2186 {
2187 struct ata_port *ap = dev->link->ap;
2188
2189 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2190 return 0;
2191
2192 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2193 }
2194
2195 static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2196 {
2197 struct ata_port *ap = dev->link->ap;
2198 unsigned int err_mask;
2199
2200 if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2201 ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2202 return;
2203 }
2204 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2205 0, ap->sector_buf, 1);
2206 if (err_mask) {
2207 ata_dev_dbg(dev,
2208 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2209 err_mask);
2210 } else {
2211 u8 *cmds = dev->ncq_send_recv_cmds;
2212
2213 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2214 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2215
2216 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2217 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2218 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2219 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2220 }
2221 }
2222 }
2223
2224 static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2225 {
2226 struct ata_port *ap = dev->link->ap;
2227 unsigned int err_mask;
2228
2229 if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2230 ata_dev_warn(dev,
2231 "NCQ Send/Recv Log not supported\n");
2232 return;
2233 }
2234 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2235 0, ap->sector_buf, 1);
2236 if (err_mask) {
2237 ata_dev_dbg(dev,
2238 "failed to get NCQ Non-Data Log Emask 0x%x\n",
2239 err_mask);
2240 } else {
2241 u8 *cmds = dev->ncq_non_data_cmds;
2242
2243 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2244 }
2245 }
2246
2247 static void ata_dev_config_ncq_prio(struct ata_device *dev)
2248 {
2249 struct ata_port *ap = dev->link->ap;
2250 unsigned int err_mask;
2251
2252 if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
2253 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2254 return;
2255 }
2256
2257 err_mask = ata_read_log_page(dev,
2258 ATA_LOG_IDENTIFY_DEVICE,
2259 ATA_LOG_SATA_SETTINGS,
2260 ap->sector_buf,
2261 1);
2262 if (err_mask) {
2263 ata_dev_dbg(dev,
2264 "failed to get Identify Device data, Emask 0x%x\n",
2265 err_mask);
2266 return;
2267 }
2268
2269 if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
2270 dev->flags |= ATA_DFLAG_NCQ_PRIO;
2271 } else {
2272 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2273 ata_dev_dbg(dev, "SATA page does not support priority\n");
2274 }
2275
2276 }
2277
2278 static int ata_dev_config_ncq(struct ata_device *dev,
2279 char *desc, size_t desc_sz)
2280 {
2281 struct ata_port *ap = dev->link->ap;
2282 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2283 unsigned int err_mask;
2284 char *aa_desc = "";
2285
2286 if (!ata_id_has_ncq(dev->id)) {
2287 desc[0] = '\0';
2288 return 0;
2289 }
2290 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2291 snprintf(desc, desc_sz, "NCQ (not used)");
2292 return 0;
2293 }
2294 if (ap->flags & ATA_FLAG_NCQ) {
2295 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
2296 dev->flags |= ATA_DFLAG_NCQ;
2297 }
2298
2299 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2300 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2301 ata_id_has_fpdma_aa(dev->id)) {
2302 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2303 SATA_FPDMA_AA);
2304 if (err_mask) {
2305 ata_dev_err(dev,
2306 "failed to enable AA (error_mask=0x%x)\n",
2307 err_mask);
2308 if (err_mask != AC_ERR_DEV) {
2309 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2310 return -EIO;
2311 }
2312 } else
2313 aa_desc = ", AA";
2314 }
2315
2316 if (hdepth >= ddepth)
2317 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2318 else
2319 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2320 ddepth, aa_desc);
2321
2322 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2323 if (ata_id_has_ncq_send_and_recv(dev->id))
2324 ata_dev_config_ncq_send_recv(dev);
2325 if (ata_id_has_ncq_non_data(dev->id))
2326 ata_dev_config_ncq_non_data(dev);
2327 if (ata_id_has_ncq_prio(dev->id))
2328 ata_dev_config_ncq_prio(dev);
2329 }
2330
2331 return 0;
2332 }
2333
2334 static void ata_dev_config_sense_reporting(struct ata_device *dev)
2335 {
2336 unsigned int err_mask;
2337
2338 if (!ata_id_has_sense_reporting(dev->id))
2339 return;
2340
2341 if (ata_id_sense_reporting_enabled(dev->id))
2342 return;
2343
2344 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2345 if (err_mask) {
2346 ata_dev_dbg(dev,
2347 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2348 err_mask);
2349 }
2350 }
2351
2352 static void ata_dev_config_zac(struct ata_device *dev)
2353 {
2354 struct ata_port *ap = dev->link->ap;
2355 unsigned int err_mask;
2356 u8 *identify_buf = ap->sector_buf;
2357
2358 dev->zac_zones_optimal_open = U32_MAX;
2359 dev->zac_zones_optimal_nonseq = U32_MAX;
2360 dev->zac_zones_max_open = U32_MAX;
2361
2362 /*
2363 * Always set the 'ZAC' flag for Host-managed devices.
2364 */
2365 if (dev->class == ATA_DEV_ZAC)
2366 dev->flags |= ATA_DFLAG_ZAC;
2367 else if (ata_id_zoned_cap(dev->id) == 0x01)
2368 /*
2369 * Check for host-aware devices.
2370 */
2371 dev->flags |= ATA_DFLAG_ZAC;
2372
2373 if (!(dev->flags & ATA_DFLAG_ZAC))
2374 return;
2375
2376 if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2377 ata_dev_warn(dev,
2378 "ATA Zoned Information Log not supported\n");
2379 return;
2380 }
2381
2382 /*
2383 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2384 */
2385 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2386 ATA_LOG_ZONED_INFORMATION,
2387 identify_buf, 1);
2388 if (!err_mask) {
2389 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2390
2391 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2392 if ((zoned_cap >> 63))
2393 dev->zac_zoned_cap = (zoned_cap & 1);
2394 opt_open = get_unaligned_le64(&identify_buf[24]);
2395 if ((opt_open >> 63))
2396 dev->zac_zones_optimal_open = (u32)opt_open;
2397 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2398 if ((opt_nonseq >> 63))
2399 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2400 max_open = get_unaligned_le64(&identify_buf[40]);
2401 if ((max_open >> 63))
2402 dev->zac_zones_max_open = (u32)max_open;
2403 }
2404 }
2405
2406 static void ata_dev_config_trusted(struct ata_device *dev)
2407 {
2408 struct ata_port *ap = dev->link->ap;
2409 u64 trusted_cap;
2410 unsigned int err;
2411
2412 if (!ata_id_has_trusted(dev->id))
2413 return;
2414
2415 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2416 ata_dev_warn(dev,
2417 "Security Log not supported\n");
2418 return;
2419 }
2420
2421 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2422 ap->sector_buf, 1);
2423 if (err) {
2424 ata_dev_dbg(dev,
2425 "failed to read Security Log, Emask 0x%x\n", err);
2426 return;
2427 }
2428
2429 trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2430 if (!(trusted_cap & (1ULL << 63))) {
2431 ata_dev_dbg(dev,
2432 "Trusted Computing capability qword not valid!\n");
2433 return;
2434 }
2435
2436 if (trusted_cap & (1 << 0))
2437 dev->flags |= ATA_DFLAG_TRUSTED;
2438 }
2439
2440 /**
2441 * ata_dev_configure - Configure the specified ATA/ATAPI device
2442 * @dev: Target device to configure
2443 *
2444 * Configure @dev according to @dev->id. Generic and low-level
2445 * driver specific fixups are also applied.
2446 *
2447 * LOCKING:
2448 * Kernel thread context (may sleep)
2449 *
2450 * RETURNS:
2451 * 0 on success, -errno otherwise
2452 */
2453 int ata_dev_configure(struct ata_device *dev)
2454 {
2455 struct ata_port *ap = dev->link->ap;
2456 struct ata_eh_context *ehc = &dev->link->eh_context;
2457 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2458 const u16 *id = dev->id;
2459 unsigned long xfer_mask;
2460 unsigned int err_mask;
2461 char revbuf[7]; /* XYZ-99\0 */
2462 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2463 char modelbuf[ATA_ID_PROD_LEN+1];
2464 int rc;
2465
2466 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2467 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2468 return 0;
2469 }
2470
2471 if (ata_msg_probe(ap))
2472 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2473
2474 /* set horkage */
2475 dev->horkage |= ata_dev_blacklisted(dev);
2476 ata_force_horkage(dev);
2477
2478 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2479 ata_dev_info(dev, "unsupported device, disabling\n");
2480 ata_dev_disable(dev);
2481 return 0;
2482 }
2483
2484 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2485 dev->class == ATA_DEV_ATAPI) {
2486 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2487 atapi_enabled ? "not supported with this driver"
2488 : "disabled");
2489 ata_dev_disable(dev);
2490 return 0;
2491 }
2492
2493 rc = ata_do_link_spd_horkage(dev);
2494 if (rc)
2495 return rc;
2496
2497 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2498 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2499 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2500 dev->horkage |= ATA_HORKAGE_NOLPM;
2501
2502 if (ap->flags & ATA_FLAG_NO_LPM)
2503 dev->horkage |= ATA_HORKAGE_NOLPM;
2504
2505 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2506 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2507 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2508 }
2509
2510 /* let ACPI work its magic */
2511 rc = ata_acpi_on_devcfg(dev);
2512 if (rc)
2513 return rc;
2514
2515 /* massage HPA, do it early as it might change IDENTIFY data */
2516 rc = ata_hpa_resize(dev);
2517 if (rc)
2518 return rc;
2519
2520 /* print device capabilities */
2521 if (ata_msg_probe(ap))
2522 ata_dev_dbg(dev,
2523 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2524 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2525 __func__,
2526 id[49], id[82], id[83], id[84],
2527 id[85], id[86], id[87], id[88]);
2528
2529 /* initialize to-be-configured parameters */
2530 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2531 dev->max_sectors = 0;
2532 dev->cdb_len = 0;
2533 dev->n_sectors = 0;
2534 dev->cylinders = 0;
2535 dev->heads = 0;
2536 dev->sectors = 0;
2537 dev->multi_count = 0;
2538
2539 /*
2540 * common ATA, ATAPI feature tests
2541 */
2542
2543 /* find max transfer mode; for printk only */
2544 xfer_mask = ata_id_xfermask(id);
2545
2546 if (ata_msg_probe(ap))
2547 ata_dump_id(id);
2548
2549 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2550 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2551 sizeof(fwrevbuf));
2552
2553 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2554 sizeof(modelbuf));
2555
2556 /* ATA-specific feature tests */
2557 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2558 if (ata_id_is_cfa(id)) {
2559 /* CPRM may make this media unusable */
2560 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2561 ata_dev_warn(dev,
2562 "supports DRM functions and may not be fully accessible\n");
2563 snprintf(revbuf, 7, "CFA");
2564 } else {
2565 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2566 /* Warn the user if the device has TPM extensions */
2567 if (ata_id_has_tpm(id))
2568 ata_dev_warn(dev,
2569 "supports DRM functions and may not be fully accessible\n");
2570 }
2571
2572 dev->n_sectors = ata_id_n_sectors(id);
2573
2574 /* get current R/W Multiple count setting */
2575 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2576 unsigned int max = dev->id[47] & 0xff;
2577 unsigned int cnt = dev->id[59] & 0xff;
2578 /* only recognize/allow powers of two here */
2579 if (is_power_of_2(max) && is_power_of_2(cnt))
2580 if (cnt <= max)
2581 dev->multi_count = cnt;
2582 }
2583
2584 if (ata_id_has_lba(id)) {
2585 const char *lba_desc;
2586 char ncq_desc[24];
2587
2588 lba_desc = "LBA";
2589 dev->flags |= ATA_DFLAG_LBA;
2590 if (ata_id_has_lba48(id)) {
2591 dev->flags |= ATA_DFLAG_LBA48;
2592 lba_desc = "LBA48";
2593
2594 if (dev->n_sectors >= (1UL << 28) &&
2595 ata_id_has_flush_ext(id))
2596 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2597 }
2598
2599 /* config NCQ */
2600 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2601 if (rc)
2602 return rc;
2603
2604 /* print device info to dmesg */
2605 if (ata_msg_drv(ap) && print_info) {
2606 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2607 revbuf, modelbuf, fwrevbuf,
2608 ata_mode_string(xfer_mask));
2609 ata_dev_info(dev,
2610 "%llu sectors, multi %u: %s %s\n",
2611 (unsigned long long)dev->n_sectors,
2612 dev->multi_count, lba_desc, ncq_desc);
2613 }
2614 } else {
2615 /* CHS */
2616
2617 /* Default translation */
2618 dev->cylinders = id[1];
2619 dev->heads = id[3];
2620 dev->sectors = id[6];
2621
2622 if (ata_id_current_chs_valid(id)) {
2623 /* Current CHS translation is valid. */
2624 dev->cylinders = id[54];
2625 dev->heads = id[55];
2626 dev->sectors = id[56];
2627 }
2628
2629 /* print device info to dmesg */
2630 if (ata_msg_drv(ap) && print_info) {
2631 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2632 revbuf, modelbuf, fwrevbuf,
2633 ata_mode_string(xfer_mask));
2634 ata_dev_info(dev,
2635 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2636 (unsigned long long)dev->n_sectors,
2637 dev->multi_count, dev->cylinders,
2638 dev->heads, dev->sectors);
2639 }
2640 }
2641
2642 /* Check and mark DevSlp capability. Get DevSlp timing variables
2643 * from SATA Settings page of Identify Device Data Log.
2644 */
2645 if (ata_id_has_devslp(dev->id)) {
2646 u8 *sata_setting = ap->sector_buf;
2647 int i, j;
2648
2649 dev->flags |= ATA_DFLAG_DEVSLP;
2650 err_mask = ata_read_log_page(dev,
2651 ATA_LOG_IDENTIFY_DEVICE,
2652 ATA_LOG_SATA_SETTINGS,
2653 sata_setting,
2654 1);
2655 if (err_mask)
2656 ata_dev_dbg(dev,
2657 "failed to get Identify Device Data, Emask 0x%x\n",
2658 err_mask);
2659 else
2660 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2661 j = ATA_LOG_DEVSLP_OFFSET + i;
2662 dev->devslp_timing[i] = sata_setting[j];
2663 }
2664 }
2665 ata_dev_config_sense_reporting(dev);
2666 ata_dev_config_zac(dev);
2667 ata_dev_config_trusted(dev);
2668 dev->cdb_len = 32;
2669 }
2670
2671 /* ATAPI-specific feature tests */
2672 else if (dev->class == ATA_DEV_ATAPI) {
2673 const char *cdb_intr_string = "";
2674 const char *atapi_an_string = "";
2675 const char *dma_dir_string = "";
2676 u32 sntf;
2677
2678 rc = atapi_cdb_len(id);
2679 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2680 if (ata_msg_warn(ap))
2681 ata_dev_warn(dev, "unsupported CDB len\n");
2682 rc = -EINVAL;
2683 goto err_out_nosup;
2684 }
2685 dev->cdb_len = (unsigned int) rc;
2686
2687 /* Enable ATAPI AN if both the host and device have
2688 * the support. If PMP is attached, SNTF is required
2689 * to enable ATAPI AN to discern between PHY status
2690 * changed notifications and ATAPI ANs.
2691 */
2692 if (atapi_an &&
2693 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2694 (!sata_pmp_attached(ap) ||
2695 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2696 /* issue SET feature command to turn this on */
2697 err_mask = ata_dev_set_feature(dev,
2698 SETFEATURES_SATA_ENABLE, SATA_AN);
2699 if (err_mask)
2700 ata_dev_err(dev,
2701 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2702 err_mask);
2703 else {
2704 dev->flags |= ATA_DFLAG_AN;
2705 atapi_an_string = ", ATAPI AN";
2706 }
2707 }
2708
2709 if (ata_id_cdb_intr(dev->id)) {
2710 dev->flags |= ATA_DFLAG_CDB_INTR;
2711 cdb_intr_string = ", CDB intr";
2712 }
2713
2714 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2715 dev->flags |= ATA_DFLAG_DMADIR;
2716 dma_dir_string = ", DMADIR";
2717 }
2718
2719 if (ata_id_has_da(dev->id)) {
2720 dev->flags |= ATA_DFLAG_DA;
2721 zpodd_init(dev);
2722 }
2723
2724 /* print device info to dmesg */
2725 if (ata_msg_drv(ap) && print_info)
2726 ata_dev_info(dev,
2727 "ATAPI: %s, %s, max %s%s%s%s\n",
2728 modelbuf, fwrevbuf,
2729 ata_mode_string(xfer_mask),
2730 cdb_intr_string, atapi_an_string,
2731 dma_dir_string);
2732 }
2733
2734 /* determine max_sectors */
2735 dev->max_sectors = ATA_MAX_SECTORS;
2736 if (dev->flags & ATA_DFLAG_LBA48)
2737 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2738
2739 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2740 200 sectors */
2741 if (ata_dev_knobble(dev)) {
2742 if (ata_msg_drv(ap) && print_info)
2743 ata_dev_info(dev, "applying bridge limits\n");
2744 dev->udma_mask &= ATA_UDMA5;
2745 dev->max_sectors = ATA_MAX_SECTORS;
2746 }
2747
2748 if ((dev->class == ATA_DEV_ATAPI) &&
2749 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2750 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2751 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2752 }
2753
2754 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2755 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2756 dev->max_sectors);
2757
2758 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2759 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2760 dev->max_sectors);
2761
2762 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2763 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2764
2765 if (ap->ops->dev_config)
2766 ap->ops->dev_config(dev);
2767
2768 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2769 /* Let the user know. We don't want to disallow opens for
2770 rescue purposes, or in case the vendor is just a blithering
2771 idiot. Do this after the dev_config call as some controllers
2772 with buggy firmware may want to avoid reporting false device
2773 bugs */
2774
2775 if (print_info) {
2776 ata_dev_warn(dev,
2777 "Drive reports diagnostics failure. This may indicate a drive\n");
2778 ata_dev_warn(dev,
2779 "fault or invalid emulation. Contact drive vendor for information.\n");
2780 }
2781 }
2782
2783 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2784 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2785 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2786 }
2787
2788 return 0;
2789
2790 err_out_nosup:
2791 if (ata_msg_probe(ap))
2792 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2793 return rc;
2794 }
2795
2796 /**
2797 * ata_cable_40wire - return 40 wire cable type
2798 * @ap: port
2799 *
2800 * Helper method for drivers which want to hardwire 40 wire cable
2801 * detection.
2802 */
2803
2804 int ata_cable_40wire(struct ata_port *ap)
2805 {
2806 return ATA_CBL_PATA40;
2807 }
2808 EXPORT_SYMBOL_GPL(ata_cable_40wire);
2809
2810 /**
2811 * ata_cable_80wire - return 80 wire cable type
2812 * @ap: port
2813 *
2814 * Helper method for drivers which want to hardwire 80 wire cable
2815 * detection.
2816 */
2817
2818 int ata_cable_80wire(struct ata_port *ap)
2819 {
2820 return ATA_CBL_PATA80;
2821 }
2822 EXPORT_SYMBOL_GPL(ata_cable_80wire);
2823
2824 /**
2825 * ata_cable_unknown - return unknown PATA cable.
2826 * @ap: port
2827 *
2828 * Helper method for drivers which have no PATA cable detection.
2829 */
2830
2831 int ata_cable_unknown(struct ata_port *ap)
2832 {
2833 return ATA_CBL_PATA_UNK;
2834 }
2835 EXPORT_SYMBOL_GPL(ata_cable_unknown);
2836
2837 /**
2838 * ata_cable_ignore - return ignored PATA cable.
2839 * @ap: port
2840 *
2841 * Helper method for drivers which don't use cable type to limit
2842 * transfer mode.
2843 */
2844 int ata_cable_ignore(struct ata_port *ap)
2845 {
2846 return ATA_CBL_PATA_IGN;
2847 }
2848 EXPORT_SYMBOL_GPL(ata_cable_ignore);
2849
2850 /**
2851 * ata_cable_sata - return SATA cable type
2852 * @ap: port
2853 *
2854 * Helper method for drivers which have SATA cables
2855 */
2856
2857 int ata_cable_sata(struct ata_port *ap)
2858 {
2859 return ATA_CBL_SATA;
2860 }
2861 EXPORT_SYMBOL_GPL(ata_cable_sata);
2862
2863 /**
2864 * ata_bus_probe - Reset and probe ATA bus
2865 * @ap: Bus to probe
2866 *
2867 * Master ATA bus probing function. Initiates a hardware-dependent
2868 * bus reset, then attempts to identify any devices found on
2869 * the bus.
2870 *
2871 * LOCKING:
2872 * PCI/etc. bus probe sem.
2873 *
2874 * RETURNS:
2875 * Zero on success, negative errno otherwise.
2876 */
2877
2878 int ata_bus_probe(struct ata_port *ap)
2879 {
2880 unsigned int classes[ATA_MAX_DEVICES];
2881 int tries[ATA_MAX_DEVICES];
2882 int rc;
2883 struct ata_device *dev;
2884
2885 ata_for_each_dev(dev, &ap->link, ALL)
2886 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2887
2888 retry:
2889 ata_for_each_dev(dev, &ap->link, ALL) {
2890 /* If we issue an SRST then an ATA drive (not ATAPI)
2891 * may change configuration and be in PIO0 timing. If
2892 * we do a hard reset (or are coming from power on)
2893 * this is true for ATA or ATAPI. Until we've set a
2894 * suitable controller mode we should not touch the
2895 * bus as we may be talking too fast.
2896 */
2897 dev->pio_mode = XFER_PIO_0;
2898 dev->dma_mode = 0xff;
2899
2900 /* If the controller has a pio mode setup function
2901 * then use it to set the chipset to rights. Don't
2902 * touch the DMA setup as that will be dealt with when
2903 * configuring devices.
2904 */
2905 if (ap->ops->set_piomode)
2906 ap->ops->set_piomode(ap, dev);
2907 }
2908
2909 /* reset and determine device classes */
2910 ap->ops->phy_reset(ap);
2911
2912 ata_for_each_dev(dev, &ap->link, ALL) {
2913 if (dev->class != ATA_DEV_UNKNOWN)
2914 classes[dev->devno] = dev->class;
2915 else
2916 classes[dev->devno] = ATA_DEV_NONE;
2917
2918 dev->class = ATA_DEV_UNKNOWN;
2919 }
2920
2921 /* read IDENTIFY page and configure devices. We have to do the identify
2922 specific sequence bass-ackwards so that PDIAG- is released by
2923 the slave device */
2924
2925 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2926 if (tries[dev->devno])
2927 dev->class = classes[dev->devno];
2928
2929 if (!ata_dev_enabled(dev))
2930 continue;
2931
2932 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2933 dev->id);
2934 if (rc)
2935 goto fail;
2936 }
2937
2938 /* Now ask for the cable type as PDIAG- should have been released */
2939 if (ap->ops->cable_detect)
2940 ap->cbl = ap->ops->cable_detect(ap);
2941
2942 /* We may have SATA bridge glue hiding here irrespective of
2943 * the reported cable types and sensed types. When SATA
2944 * drives indicate we have a bridge, we don't know which end
2945 * of the link the bridge is which is a problem.
2946 */
2947 ata_for_each_dev(dev, &ap->link, ENABLED)
2948 if (ata_id_is_sata(dev->id))
2949 ap->cbl = ATA_CBL_SATA;
2950
2951 /* After the identify sequence we can now set up the devices. We do
2952 this in the normal order so that the user doesn't get confused */
2953
2954 ata_for_each_dev(dev, &ap->link, ENABLED) {
2955 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2956 rc = ata_dev_configure(dev);
2957 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2958 if (rc)
2959 goto fail;
2960 }
2961
2962 /* configure transfer mode */
2963 rc = ata_set_mode(&ap->link, &dev);
2964 if (rc)
2965 goto fail;
2966
2967 ata_for_each_dev(dev, &ap->link, ENABLED)
2968 return 0;
2969
2970 return -ENODEV;
2971
2972 fail:
2973 tries[dev->devno]--;
2974
2975 switch (rc) {
2976 case -EINVAL:
2977 /* eeek, something went very wrong, give up */
2978 tries[dev->devno] = 0;
2979 break;
2980
2981 case -ENODEV:
2982 /* give it just one more chance */
2983 tries[dev->devno] = min(tries[dev->devno], 1);
2984 /* fall through */
2985 case -EIO:
2986 if (tries[dev->devno] == 1) {
2987 /* This is the last chance, better to slow
2988 * down than lose it.
2989 */
2990 sata_down_spd_limit(&ap->link, 0);
2991 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2992 }
2993 }
2994
2995 if (!tries[dev->devno])
2996 ata_dev_disable(dev);
2997
2998 goto retry;
2999 }
3000
3001 /**
3002 * sata_print_link_status - Print SATA link status
3003 * @link: SATA link to printk link status about
3004 *
3005 * This function prints link speed and status of a SATA link.
3006 *
3007 * LOCKING:
3008 * None.
3009 */
3010 static void sata_print_link_status(struct ata_link *link)
3011 {
3012 u32 sstatus, scontrol, tmp;
3013
3014 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3015 return;
3016 sata_scr_read(link, SCR_CONTROL, &scontrol);
3017
3018 if (ata_phys_link_online(link)) {
3019 tmp = (sstatus >> 4) & 0xf;
3020 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
3021 sata_spd_string(tmp), sstatus, scontrol);
3022 } else {
3023 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
3024 sstatus, scontrol);
3025 }
3026 }
3027
3028 /**
3029 * ata_dev_pair - return other device on cable
3030 * @adev: device
3031 *
3032 * Obtain the other device on the same cable, or if none is
3033 * present NULL is returned
3034 */
3035
3036 struct ata_device *ata_dev_pair(struct ata_device *adev)
3037 {
3038 struct ata_link *link = adev->link;
3039 struct ata_device *pair = &link->device[1 - adev->devno];
3040 if (!ata_dev_enabled(pair))
3041 return NULL;
3042 return pair;
3043 }
3044 EXPORT_SYMBOL_GPL(ata_dev_pair);
3045
3046 /**
3047 * sata_down_spd_limit - adjust SATA spd limit downward
3048 * @link: Link to adjust SATA spd limit for
3049 * @spd_limit: Additional limit
3050 *
3051 * Adjust SATA spd limit of @link downward. Note that this
3052 * function only adjusts the limit. The change must be applied
3053 * using sata_set_spd().
3054 *
3055 * If @spd_limit is non-zero, the speed is limited to equal to or
3056 * lower than @spd_limit if such speed is supported. If
3057 * @spd_limit is slower than any supported speed, only the lowest
3058 * supported speed is allowed.
3059 *
3060 * LOCKING:
3061 * Inherited from caller.
3062 *
3063 * RETURNS:
3064 * 0 on success, negative errno on failure
3065 */
3066 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3067 {
3068 u32 sstatus, spd, mask;
3069 int rc, bit;
3070
3071 if (!sata_scr_valid(link))
3072 return -EOPNOTSUPP;
3073
3074 /* If SCR can be read, use it to determine the current SPD.
3075 * If not, use cached value in link->sata_spd.
3076 */
3077 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3078 if (rc == 0 && ata_sstatus_online(sstatus))
3079 spd = (sstatus >> 4) & 0xf;
3080 else
3081 spd = link->sata_spd;
3082
3083 mask = link->sata_spd_limit;
3084 if (mask <= 1)
3085 return -EINVAL;
3086
3087 /* unconditionally mask off the highest bit */
3088 bit = fls(mask) - 1;
3089 mask &= ~(1 << bit);
3090
3091 /*
3092 * Mask off all speeds higher than or equal to the current one. At
3093 * this point, if current SPD is not available and we previously
3094 * recorded the link speed from SStatus, the driver has already
3095 * masked off the highest bit so mask should already be 1 or 0.
3096 * Otherwise, we should not force 1.5Gbps on a link where we have
3097 * not previously recorded speed from SStatus. Just return in this
3098 * case.
3099 */
3100 if (spd > 1)
3101 mask &= (1 << (spd - 1)) - 1;
3102 else
3103 return -EINVAL;
3104
3105 /* were we already at the bottom? */
3106 if (!mask)
3107 return -EINVAL;
3108
3109 if (spd_limit) {
3110 if (mask & ((1 << spd_limit) - 1))
3111 mask &= (1 << spd_limit) - 1;
3112 else {
3113 bit = ffs(mask) - 1;
3114 mask = 1 << bit;
3115 }
3116 }
3117
3118 link->sata_spd_limit = mask;
3119
3120 ata_link_warn(link, "limiting SATA link speed to %s\n",
3121 sata_spd_string(fls(mask)));
3122
3123 return 0;
3124 }
3125
3126 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
3127 {
3128 struct ata_link *host_link = &link->ap->link;
3129 u32 limit, target, spd;
3130
3131 limit = link->sata_spd_limit;
3132
3133 /* Don't configure downstream link faster than upstream link.
3134 * It doesn't speed up anything and some PMPs choke on such
3135 * configuration.
3136 */
3137 if (!ata_is_host_link(link) && host_link->sata_spd)
3138 limit &= (1 << host_link->sata_spd) - 1;
3139
3140 if (limit == UINT_MAX)
3141 target = 0;
3142 else
3143 target = fls(limit);
3144
3145 spd = (*scontrol >> 4) & 0xf;
3146 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
3147
3148 return spd != target;
3149 }
3150
3151 /**
3152 * sata_set_spd_needed - is SATA spd configuration needed
3153 * @link: Link in question
3154 *
3155 * Test whether the spd limit in SControl matches
3156 * @link->sata_spd_limit. This function is used to determine
3157 * whether hardreset is necessary to apply SATA spd
3158 * configuration.
3159 *
3160 * LOCKING:
3161 * Inherited from caller.
3162 *
3163 * RETURNS:
3164 * 1 if SATA spd configuration is needed, 0 otherwise.
3165 */
3166 static int sata_set_spd_needed(struct ata_link *link)
3167 {
3168 u32 scontrol;
3169
3170 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3171 return 1;
3172
3173 return __sata_set_spd_needed(link, &scontrol);
3174 }
3175
3176 /**
3177 * sata_set_spd - set SATA spd according to spd limit
3178 * @link: Link to set SATA spd for
3179 *
3180 * Set SATA spd of @link according to sata_spd_limit.
3181 *
3182 * LOCKING:
3183 * Inherited from caller.
3184 *
3185 * RETURNS:
3186 * 0 if spd doesn't need to be changed, 1 if spd has been
3187 * changed. Negative errno if SCR registers are inaccessible.
3188 */
3189 int sata_set_spd(struct ata_link *link)
3190 {
3191 u32 scontrol;
3192 int rc;
3193
3194 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3195 return rc;
3196
3197 if (!__sata_set_spd_needed(link, &scontrol))
3198 return 0;
3199
3200 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3201 return rc;
3202
3203 return 1;
3204 }
3205 EXPORT_SYMBOL_GPL(sata_set_spd);
3206
3207 /*
3208 * This mode timing computation functionality is ported over from
3209 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3210 */
3211 /*
3212 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3213 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3214 * for UDMA6, which is currently supported only by Maxtor drives.
3215 *
3216 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3217 */
3218
3219 static const struct ata_timing ata_timing[] = {
3220 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
3221 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
3222 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
3223 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
3224 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
3225 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
3226 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
3227 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
3228
3229 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
3230 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
3231 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
3232
3233 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
3234 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
3235 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
3236 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
3237 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
3238
3239 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
3240 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
3241 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
3242 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
3243 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
3244 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
3245 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
3246 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
3247
3248 { 0xFF }
3249 };
3250
3251 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3252 #define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0)
3253
3254 static void ata_timing_quantize(const struct ata_timing *t,
3255 struct ata_timing *q, int T, int UT)
3256 {
3257 q->setup = EZ(t->setup, T);
3258 q->act8b = EZ(t->act8b, T);
3259 q->rec8b = EZ(t->rec8b, T);
3260 q->cyc8b = EZ(t->cyc8b, T);
3261 q->active = EZ(t->active, T);
3262 q->recover = EZ(t->recover, T);
3263 q->dmack_hold = EZ(t->dmack_hold, T);
3264 q->cycle = EZ(t->cycle, T);
3265 q->udma = EZ(t->udma, UT);
3266 }
3267
3268 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3269 struct ata_timing *m, unsigned int what)
3270 {
3271 if (what & ATA_TIMING_SETUP)
3272 m->setup = max(a->setup, b->setup);
3273 if (what & ATA_TIMING_ACT8B)
3274 m->act8b = max(a->act8b, b->act8b);
3275 if (what & ATA_TIMING_REC8B)
3276 m->rec8b = max(a->rec8b, b->rec8b);
3277 if (what & ATA_TIMING_CYC8B)
3278 m->cyc8b = max(a->cyc8b, b->cyc8b);
3279 if (what & ATA_TIMING_ACTIVE)
3280 m->active = max(a->active, b->active);
3281 if (what & ATA_TIMING_RECOVER)
3282 m->recover = max(a->recover, b->recover);
3283 if (what & ATA_TIMING_DMACK_HOLD)
3284 m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3285 if (what & ATA_TIMING_CYCLE)
3286 m->cycle = max(a->cycle, b->cycle);
3287 if (what & ATA_TIMING_UDMA)
3288 m->udma = max(a->udma, b->udma);
3289 }
3290 EXPORT_SYMBOL_GPL(ata_timing_merge);
3291
3292 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3293 {
3294 const struct ata_timing *t = ata_timing;
3295
3296 while (xfer_mode > t->mode)
3297 t++;
3298
3299 if (xfer_mode == t->mode)
3300 return t;
3301
3302 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
3303 __func__, xfer_mode);
3304
3305 return NULL;
3306 }
3307 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
3308
3309 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3310 struct ata_timing *t, int T, int UT)
3311 {
3312 const u16 *id = adev->id;
3313 const struct ata_timing *s;
3314 struct ata_timing p;
3315
3316 /*
3317 * Find the mode.
3318 */
3319 s = ata_timing_find_mode(speed);
3320 if (!s)
3321 return -EINVAL;
3322
3323 memcpy(t, s, sizeof(*s));
3324
3325 /*
3326 * If the drive is an EIDE drive, it can tell us it needs extended
3327 * PIO/MW_DMA cycle timing.
3328 */
3329
3330 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3331 memset(&p, 0, sizeof(p));
3332
3333 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
3334 if (speed <= XFER_PIO_2)
3335 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3336 else if ((speed <= XFER_PIO_4) ||
3337 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3338 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3339 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3340 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3341
3342 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3343 }
3344
3345 /*
3346 * Convert the timing to bus clock counts.
3347 */
3348
3349 ata_timing_quantize(t, t, T, UT);
3350
3351 /*
3352 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3353 * S.M.A.R.T * and some other commands. We have to ensure that the
3354 * DMA cycle timing is slower/equal than the fastest PIO timing.
3355 */
3356
3357 if (speed > XFER_PIO_6) {
3358 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3359 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3360 }
3361
3362 /*
3363 * Lengthen active & recovery time so that cycle time is correct.
3364 */
3365
3366 if (t->act8b + t->rec8b < t->cyc8b) {
3367 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3368 t->rec8b = t->cyc8b - t->act8b;
3369 }
3370
3371 if (t->active + t->recover < t->cycle) {
3372 t->active += (t->cycle - (t->active + t->recover)) / 2;
3373 t->recover = t->cycle - t->active;
3374 }
3375
3376 /*
3377 * In a few cases quantisation may produce enough errors to
3378 * leave t->cycle too low for the sum of active and recovery
3379 * if so we must correct this.
3380 */
3381 if (t->active + t->recover > t->cycle)
3382 t->cycle = t->active + t->recover;
3383
3384 return 0;
3385 }
3386 EXPORT_SYMBOL_GPL(ata_timing_compute);
3387
3388 /**
3389 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3390 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3391 * @cycle: cycle duration in ns
3392 *
3393 * Return matching xfer mode for @cycle. The returned mode is of
3394 * the transfer type specified by @xfer_shift. If @cycle is too
3395 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3396 * than the fastest known mode, the fasted mode is returned.
3397 *
3398 * LOCKING:
3399 * None.
3400 *
3401 * RETURNS:
3402 * Matching xfer_mode, 0xff if no match found.
3403 */
3404 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3405 {
3406 u8 base_mode = 0xff, last_mode = 0xff;
3407 const struct ata_xfer_ent *ent;
3408 const struct ata_timing *t;
3409
3410 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3411 if (ent->shift == xfer_shift)
3412 base_mode = ent->base;
3413
3414 for (t = ata_timing_find_mode(base_mode);
3415 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3416 unsigned short this_cycle;
3417
3418 switch (xfer_shift) {
3419 case ATA_SHIFT_PIO:
3420 case ATA_SHIFT_MWDMA:
3421 this_cycle = t->cycle;
3422 break;
3423 case ATA_SHIFT_UDMA:
3424 this_cycle = t->udma;
3425 break;
3426 default:
3427 return 0xff;
3428 }
3429
3430 if (cycle > this_cycle)
3431 break;
3432
3433 last_mode = t->mode;
3434 }
3435
3436 return last_mode;
3437 }
3438
3439 /**
3440 * ata_down_xfermask_limit - adjust dev xfer masks downward
3441 * @dev: Device to adjust xfer masks
3442 * @sel: ATA_DNXFER_* selector
3443 *
3444 * Adjust xfer masks of @dev downward. Note that this function
3445 * does not apply the change. Invoking ata_set_mode() afterwards
3446 * will apply the limit.
3447 *
3448 * LOCKING:
3449 * Inherited from caller.
3450 *
3451 * RETURNS:
3452 * 0 on success, negative errno on failure
3453 */
3454 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3455 {
3456 char buf[32];
3457 unsigned long orig_mask, xfer_mask;
3458 unsigned long pio_mask, mwdma_mask, udma_mask;
3459 int quiet, highbit;
3460
3461 quiet = !!(sel & ATA_DNXFER_QUIET);
3462 sel &= ~ATA_DNXFER_QUIET;
3463
3464 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3465 dev->mwdma_mask,
3466 dev->udma_mask);
3467 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3468
3469 switch (sel) {
3470 case ATA_DNXFER_PIO:
3471 highbit = fls(pio_mask) - 1;
3472 pio_mask &= ~(1 << highbit);
3473 break;
3474
3475 case ATA_DNXFER_DMA:
3476 if (udma_mask) {
3477 highbit = fls(udma_mask) - 1;
3478 udma_mask &= ~(1 << highbit);
3479 if (!udma_mask)
3480 return -ENOENT;
3481 } else if (mwdma_mask) {
3482 highbit = fls(mwdma_mask) - 1;
3483 mwdma_mask &= ~(1 << highbit);
3484 if (!mwdma_mask)
3485 return -ENOENT;
3486 }
3487 break;
3488
3489 case ATA_DNXFER_40C:
3490 udma_mask &= ATA_UDMA_MASK_40C;
3491 break;
3492
3493 case ATA_DNXFER_FORCE_PIO0:
3494 pio_mask &= 1;
3495 /* fall through */
3496 case ATA_DNXFER_FORCE_PIO:
3497 mwdma_mask = 0;
3498 udma_mask = 0;
3499 break;
3500
3501 default:
3502 BUG();
3503 }
3504
3505 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3506
3507 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3508 return -ENOENT;
3509
3510 if (!quiet) {
3511 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3512 snprintf(buf, sizeof(buf), "%s:%s",
3513 ata_mode_string(xfer_mask),
3514 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3515 else
3516 snprintf(buf, sizeof(buf), "%s",
3517 ata_mode_string(xfer_mask));
3518
3519 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3520 }
3521
3522 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3523 &dev->udma_mask);
3524
3525 return 0;
3526 }
3527
3528 static int ata_dev_set_mode(struct ata_device *dev)
3529 {
3530 struct ata_port *ap = dev->link->ap;
3531 struct ata_eh_context *ehc = &dev->link->eh_context;
3532 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3533 const char *dev_err_whine = "";
3534 int ign_dev_err = 0;
3535 unsigned int err_mask = 0;
3536 int rc;
3537
3538 dev->flags &= ~ATA_DFLAG_PIO;
3539 if (dev->xfer_shift == ATA_SHIFT_PIO)
3540 dev->flags |= ATA_DFLAG_PIO;
3541
3542 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3543 dev_err_whine = " (SET_XFERMODE skipped)";
3544 else {
3545 if (nosetxfer)
3546 ata_dev_warn(dev,
3547 "NOSETXFER but PATA detected - can't "
3548 "skip SETXFER, might malfunction\n");
3549 err_mask = ata_dev_set_xfermode(dev);
3550 }
3551
3552 if (err_mask & ~AC_ERR_DEV)
3553 goto fail;
3554
3555 /* revalidate */
3556 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3557 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3558 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3559 if (rc)
3560 return rc;
3561
3562 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3563 /* Old CFA may refuse this command, which is just fine */
3564 if (ata_id_is_cfa(dev->id))
3565 ign_dev_err = 1;
3566 /* Catch several broken garbage emulations plus some pre
3567 ATA devices */
3568 if (ata_id_major_version(dev->id) == 0 &&
3569 dev->pio_mode <= XFER_PIO_2)
3570 ign_dev_err = 1;
3571 /* Some very old devices and some bad newer ones fail
3572 any kind of SET_XFERMODE request but support PIO0-2
3573 timings and no IORDY */
3574 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3575 ign_dev_err = 1;
3576 }
3577 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3578 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3579 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3580 dev->dma_mode == XFER_MW_DMA_0 &&
3581 (dev->id[63] >> 8) & 1)
3582 ign_dev_err = 1;
3583
3584 /* if the device is actually configured correctly, ignore dev err */
3585 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3586 ign_dev_err = 1;
3587
3588 if (err_mask & AC_ERR_DEV) {
3589 if (!ign_dev_err)
3590 goto fail;
3591 else
3592 dev_err_whine = " (device error ignored)";
3593 }
3594
3595 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3596 dev->xfer_shift, (int)dev->xfer_mode);
3597
3598 if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3599 ehc->i.flags & ATA_EHI_DID_HARDRESET)
3600 ata_dev_info(dev, "configured for %s%s\n",
3601 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3602 dev_err_whine);
3603
3604 return 0;
3605
3606 fail:
3607 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3608 return -EIO;
3609 }
3610
3611 /**
3612 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3613 * @link: link on which timings will be programmed
3614 * @r_failed_dev: out parameter for failed device
3615 *
3616 * Standard implementation of the function used to tune and set
3617 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3618 * ata_dev_set_mode() fails, pointer to the failing device is
3619 * returned in @r_failed_dev.
3620 *
3621 * LOCKING:
3622 * PCI/etc. bus probe sem.
3623 *
3624 * RETURNS:
3625 * 0 on success, negative errno otherwise
3626 */
3627
3628 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3629 {
3630 struct ata_port *ap = link->ap;
3631 struct ata_device *dev;
3632 int rc = 0, used_dma = 0, found = 0;
3633
3634 /* step 1: calculate xfer_mask */
3635 ata_for_each_dev(dev, link, ENABLED) {
3636 unsigned long pio_mask, dma_mask;
3637 unsigned int mode_mask;
3638
3639 mode_mask = ATA_DMA_MASK_ATA;
3640 if (dev->class == ATA_DEV_ATAPI)
3641 mode_mask = ATA_DMA_MASK_ATAPI;
3642 else if (ata_id_is_cfa(dev->id))
3643 mode_mask = ATA_DMA_MASK_CFA;
3644
3645 ata_dev_xfermask(dev);
3646 ata_force_xfermask(dev);
3647
3648 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3649
3650 if (libata_dma_mask & mode_mask)
3651 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3652 dev->udma_mask);
3653 else
3654 dma_mask = 0;
3655
3656 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3657 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3658
3659 found = 1;
3660 if (ata_dma_enabled(dev))
3661 used_dma = 1;
3662 }
3663 if (!found)
3664 goto out;
3665
3666 /* step 2: always set host PIO timings */
3667 ata_for_each_dev(dev, link, ENABLED) {
3668 if (dev->pio_mode == 0xff) {
3669 ata_dev_warn(dev, "no PIO support\n");
3670 rc = -EINVAL;
3671 goto out;
3672 }
3673
3674 dev->xfer_mode = dev->pio_mode;
3675 dev->xfer_shift = ATA_SHIFT_PIO;
3676 if (ap->ops->set_piomode)
3677 ap->ops->set_piomode(ap, dev);
3678 }
3679
3680 /* step 3: set host DMA timings */
3681 ata_for_each_dev(dev, link, ENABLED) {
3682 if (!ata_dma_enabled(dev))
3683 continue;
3684
3685 dev->xfer_mode = dev->dma_mode;
3686 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3687 if (ap->ops->set_dmamode)
3688 ap->ops->set_dmamode(ap, dev);
3689 }
3690
3691 /* step 4: update devices' xfer mode */
3692 ata_for_each_dev(dev, link, ENABLED) {
3693 rc = ata_dev_set_mode(dev);
3694 if (rc)
3695 goto out;
3696 }
3697
3698 /* Record simplex status. If we selected DMA then the other
3699 * host channels are not permitted to do so.
3700 */
3701 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3702 ap->host->simplex_claimed = ap;
3703
3704 out:
3705 if (rc)
3706 *r_failed_dev = dev;
3707 return rc;
3708 }
3709 EXPORT_SYMBOL_GPL(ata_do_set_mode);
3710
3711 /**
3712 * ata_wait_ready - wait for link to become ready
3713 * @link: link to be waited on
3714 * @deadline: deadline jiffies for the operation
3715 * @check_ready: callback to check link readiness
3716 *
3717 * Wait for @link to become ready. @check_ready should return
3718 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3719 * link doesn't seem to be occupied, other errno for other error
3720 * conditions.
3721 *
3722 * Transient -ENODEV conditions are allowed for
3723 * ATA_TMOUT_FF_WAIT.
3724 *
3725 * LOCKING:
3726 * EH context.
3727 *
3728 * RETURNS:
3729 * 0 if @link is ready before @deadline; otherwise, -errno.
3730 */
3731 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3732 int (*check_ready)(struct ata_link *link))
3733 {
3734 unsigned long start = jiffies;
3735 unsigned long nodev_deadline;
3736 int warned = 0;
3737
3738 /* choose which 0xff timeout to use, read comment in libata.h */
3739 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3740 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3741 else
3742 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3743
3744 /* Slave readiness can't be tested separately from master. On
3745 * M/S emulation configuration, this function should be called
3746 * only on the master and it will handle both master and slave.
3747 */
3748 WARN_ON(link == link->ap->slave_link);
3749
3750 if (time_after(nodev_deadline, deadline))
3751 nodev_deadline = deadline;
3752
3753 while (1) {
3754 unsigned long now = jiffies;
3755 int ready, tmp;
3756
3757 ready = tmp = check_ready(link);
3758 if (ready > 0)
3759 return 0;
3760
3761 /*
3762 * -ENODEV could be transient. Ignore -ENODEV if link
3763 * is online. Also, some SATA devices take a long
3764 * time to clear 0xff after reset. Wait for
3765 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3766 * offline.
3767 *
3768 * Note that some PATA controllers (pata_ali) explode
3769 * if status register is read more than once when
3770 * there's no device attached.
3771 */
3772 if (ready == -ENODEV) {
3773 if (ata_link_online(link))
3774 ready = 0;
3775 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3776 !ata_link_offline(link) &&
3777 time_before(now, nodev_deadline))
3778 ready = 0;
3779 }
3780
3781 if (ready)
3782 return ready;
3783 if (time_after(now, deadline))
3784 return -EBUSY;
3785
3786 if (!warned && time_after(now, start + 5 * HZ) &&
3787 (deadline - now > 3 * HZ)) {
3788 ata_link_warn(link,
3789 "link is slow to respond, please be patient "
3790 "(ready=%d)\n", tmp);
3791 warned = 1;
3792 }
3793
3794 ata_msleep(link->ap, 50);
3795 }
3796 }
3797
3798 /**
3799 * ata_wait_after_reset - wait for link to become ready after reset
3800 * @link: link to be waited on
3801 * @deadline: deadline jiffies for the operation
3802 * @check_ready: callback to check link readiness
3803 *
3804 * Wait for @link to become ready after reset.
3805 *
3806 * LOCKING:
3807 * EH context.
3808 *
3809 * RETURNS:
3810 * 0 if @link is ready before @deadline; otherwise, -errno.
3811 */
3812 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3813 int (*check_ready)(struct ata_link *link))
3814 {
3815 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3816
3817 return ata_wait_ready(link, deadline, check_ready);
3818 }
3819 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
3820
3821 /**
3822 * sata_link_debounce - debounce SATA phy status
3823 * @link: ATA link to debounce SATA phy status for
3824 * @params: timing parameters { interval, duration, timeout } in msec
3825 * @deadline: deadline jiffies for the operation
3826 *
3827 * Make sure SStatus of @link reaches stable state, determined by
3828 * holding the same value where DET is not 1 for @duration polled
3829 * every @interval, before @timeout. Timeout constraints the
3830 * beginning of the stable state. Because DET gets stuck at 1 on
3831 * some controllers after hot unplugging, this functions waits
3832 * until timeout then returns 0 if DET is stable at 1.
3833 *
3834 * @timeout is further limited by @deadline. The sooner of the
3835 * two is used.
3836 *
3837 * LOCKING:
3838 * Kernel thread context (may sleep)
3839 *
3840 * RETURNS:
3841 * 0 on success, -errno on failure.
3842 */
3843 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3844 unsigned long deadline)
3845 {
3846 unsigned long interval = params[0];
3847 unsigned long duration = params[1];
3848 unsigned long last_jiffies, t;
3849 u32 last, cur;
3850 int rc;
3851
3852 t = ata_deadline(jiffies, params[2]);
3853 if (time_before(t, deadline))
3854 deadline = t;
3855
3856 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3857 return rc;
3858 cur &= 0xf;
3859
3860 last = cur;
3861 last_jiffies = jiffies;
3862
3863 while (1) {
3864 ata_msleep(link->ap, interval);
3865 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3866 return rc;
3867 cur &= 0xf;
3868
3869 /* DET stable? */
3870 if (cur == last) {
3871 if (cur == 1 && time_before(jiffies, deadline))
3872 continue;
3873 if (time_after(jiffies,
3874 ata_deadline(last_jiffies, duration)))
3875 return 0;
3876 continue;
3877 }
3878
3879 /* unstable, start over */
3880 last = cur;
3881 last_jiffies = jiffies;
3882
3883 /* Check deadline. If debouncing failed, return
3884 * -EPIPE to tell upper layer to lower link speed.
3885 */
3886 if (time_after(jiffies, deadline))
3887 return -EPIPE;
3888 }
3889 }
3890 EXPORT_SYMBOL_GPL(sata_link_debounce);
3891
3892 /**
3893 * sata_link_resume - resume SATA link
3894 * @link: ATA link to resume SATA
3895 * @params: timing parameters { interval, duration, timeout } in msec
3896 * @deadline: deadline jiffies for the operation
3897 *
3898 * Resume SATA phy @link and debounce it.
3899 *
3900 * LOCKING:
3901 * Kernel thread context (may sleep)
3902 *
3903 * RETURNS:
3904 * 0 on success, -errno on failure.
3905 */
3906 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3907 unsigned long deadline)
3908 {
3909 int tries = ATA_LINK_RESUME_TRIES;
3910 u32 scontrol, serror;
3911 int rc;
3912
3913 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3914 return rc;
3915
3916 /*
3917 * Writes to SControl sometimes get ignored under certain
3918 * controllers (ata_piix SIDPR). Make sure DET actually is
3919 * cleared.
3920 */
3921 do {
3922 scontrol = (scontrol & 0x0f0) | 0x300;
3923 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3924 return rc;
3925 /*
3926 * Some PHYs react badly if SStatus is pounded
3927 * immediately after resuming. Delay 200ms before
3928 * debouncing.
3929 */
3930 if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
3931 ata_msleep(link->ap, 200);
3932
3933 /* is SControl restored correctly? */
3934 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3935 return rc;
3936 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3937
3938 if ((scontrol & 0xf0f) != 0x300) {
3939 ata_link_warn(link, "failed to resume link (SControl %X)\n",
3940 scontrol);
3941 return 0;
3942 }
3943
3944 if (tries < ATA_LINK_RESUME_TRIES)
3945 ata_link_warn(link, "link resume succeeded after %d retries\n",
3946 ATA_LINK_RESUME_TRIES - tries);
3947
3948 if ((rc = sata_link_debounce(link, params, deadline)))
3949 return rc;
3950
3951 /* clear SError, some PHYs require this even for SRST to work */
3952 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3953 rc = sata_scr_write(link, SCR_ERROR, serror);
3954
3955 return rc != -EINVAL ? rc : 0;
3956 }
3957 EXPORT_SYMBOL_GPL(sata_link_resume);
3958
3959 /**
3960 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields
3961 * @link: ATA link to manipulate SControl for
3962 * @policy: LPM policy to configure
3963 * @spm_wakeup: initiate LPM transition to active state
3964 *
3965 * Manipulate the IPM field of the SControl register of @link
3966 * according to @policy. If @policy is ATA_LPM_MAX_POWER and
3967 * @spm_wakeup is %true, the SPM field is manipulated to wake up
3968 * the link. This function also clears PHYRDY_CHG before
3969 * returning.
3970 *
3971 * LOCKING:
3972 * EH context.
3973 *
3974 * RETURNS:
3975 * 0 on success, -errno otherwise.
3976 */
3977 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3978 bool spm_wakeup)
3979 {
3980 struct ata_eh_context *ehc = &link->eh_context;
3981 bool woken_up = false;
3982 u32 scontrol;
3983 int rc;
3984
3985 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
3986 if (rc)
3987 return rc;
3988
3989 switch (policy) {
3990 case ATA_LPM_MAX_POWER:
3991 /* disable all LPM transitions */
3992 scontrol |= (0x7 << 8);
3993 /* initiate transition to active state */
3994 if (spm_wakeup) {
3995 scontrol |= (0x4 << 12);
3996 woken_up = true;
3997 }
3998 break;
3999 case ATA_LPM_MED_POWER:
4000 /* allow LPM to PARTIAL */
4001 scontrol &= ~(0x1 << 8);
4002 scontrol |= (0x6 << 8);
4003 break;
4004 case ATA_LPM_MED_POWER_WITH_DIPM:
4005 case ATA_LPM_MIN_POWER_WITH_PARTIAL:
4006 case ATA_LPM_MIN_POWER:
4007 if (ata_link_nr_enabled(link) > 0)
4008 /* no restrictions on LPM transitions */
4009 scontrol &= ~(0x7 << 8);
4010 else {
4011 /* empty port, power off */
4012 scontrol &= ~0xf;
4013 scontrol |= (0x1 << 2);
4014 }
4015 break;
4016 default:
4017 WARN_ON(1);
4018 }
4019
4020 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
4021 if (rc)
4022 return rc;
4023
4024 /* give the link time to transit out of LPM state */
4025 if (woken_up)
4026 msleep(10);
4027
4028 /* clear PHYRDY_CHG from SError */
4029 ehc->i.serror &= ~SERR_PHYRDY_CHG;
4030 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
4031 }
4032 EXPORT_SYMBOL_GPL(sata_link_scr_lpm);
4033
4034 /**
4035 * ata_std_prereset - prepare for reset
4036 * @link: ATA link to be reset
4037 * @deadline: deadline jiffies for the operation
4038 *
4039 * @link is about to be reset. Initialize it. Failure from
4040 * prereset makes libata abort whole reset sequence and give up
4041 * that port, so prereset should be best-effort. It does its
4042 * best to prepare for reset sequence but if things go wrong, it
4043 * should just whine, not fail.
4044 *
4045 * LOCKING:
4046 * Kernel thread context (may sleep)
4047 *
4048 * RETURNS:
4049 * 0 on success, -errno otherwise.
4050 */
4051 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
4052 {
4053 struct ata_port *ap = link->ap;
4054 struct ata_eh_context *ehc = &link->eh_context;
4055 const unsigned long *timing = sata_ehc_deb_timing(ehc);
4056 int rc;
4057
4058 /* if we're about to do hardreset, nothing more to do */
4059 if (ehc->i.action & ATA_EH_HARDRESET)
4060 return 0;
4061
4062 /* if SATA, resume link */
4063 if (ap->flags & ATA_FLAG_SATA) {
4064 rc = sata_link_resume(link, timing, deadline);
4065 /* whine about phy resume failure but proceed */
4066 if (rc && rc != -EOPNOTSUPP)
4067 ata_link_warn(link,
4068 "failed to resume link for reset (errno=%d)\n",
4069 rc);
4070 }
4071
4072 /* no point in trying softreset on offline link */
4073 if (ata_phys_link_offline(link))
4074 ehc->i.action &= ~ATA_EH_SOFTRESET;
4075
4076 return 0;
4077 }
4078 EXPORT_SYMBOL_GPL(ata_std_prereset);
4079
4080 /**
4081 * sata_link_hardreset - reset link via SATA phy reset
4082 * @link: link to reset
4083 * @timing: timing parameters { interval, duration, timeout } in msec
4084 * @deadline: deadline jiffies for the operation
4085 * @online: optional out parameter indicating link onlineness
4086 * @check_ready: optional callback to check link readiness
4087 *
4088 * SATA phy-reset @link using DET bits of SControl register.
4089 * After hardreset, link readiness is waited upon using
4090 * ata_wait_ready() if @check_ready is specified. LLDs are
4091 * allowed to not specify @check_ready and wait itself after this
4092 * function returns. Device classification is LLD's
4093 * responsibility.
4094 *
4095 * *@online is set to one iff reset succeeded and @link is online
4096 * after reset.
4097 *
4098 * LOCKING:
4099 * Kernel thread context (may sleep)
4100 *
4101 * RETURNS:
4102 * 0 on success, -errno otherwise.
4103 */
4104 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
4105 unsigned long deadline,
4106 bool *online, int (*check_ready)(struct ata_link *))
4107 {
4108 u32 scontrol;
4109 int rc;
4110
4111 DPRINTK("ENTER\n");
4112
4113 if (online)
4114 *online = false;
4115
4116 if (sata_set_spd_needed(link)) {
4117 /* SATA spec says nothing about how to reconfigure
4118 * spd. To be on the safe side, turn off phy during
4119 * reconfiguration. This works for at least ICH7 AHCI
4120 * and Sil3124.
4121 */
4122 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4123 goto out;
4124
4125 scontrol = (scontrol & 0x0f0) | 0x304;
4126
4127 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
4128 goto out;
4129
4130 sata_set_spd(link);
4131 }
4132
4133 /* issue phy wake/reset */
4134 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4135 goto out;
4136
4137 scontrol = (scontrol & 0x0f0) | 0x301;
4138
4139 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
4140 goto out;
4141
4142 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
4143 * 10.4.2 says at least 1 ms.
4144 */
4145 ata_msleep(link->ap, 1);
4146
4147 /* bring link back */
4148 rc = sata_link_resume(link, timing, deadline);
4149 if (rc)
4150 goto out;
4151 /* if link is offline nothing more to do */
4152 if (ata_phys_link_offline(link))
4153 goto out;
4154
4155 /* Link is online. From this point, -ENODEV too is an error. */
4156 if (online)
4157 *online = true;
4158
4159 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
4160 /* If PMP is supported, we have to do follow-up SRST.
4161 * Some PMPs don't send D2H Reg FIS after hardreset if
4162 * the first port is empty. Wait only for
4163 * ATA_TMOUT_PMP_SRST_WAIT.
4164 */
4165 if (check_ready) {
4166 unsigned long pmp_deadline;
4167
4168 pmp_deadline = ata_deadline(jiffies,
4169 ATA_TMOUT_PMP_SRST_WAIT);
4170 if (time_after(pmp_deadline, deadline))
4171 pmp_deadline = deadline;
4172 ata_wait_ready(link, pmp_deadline, check_ready);
4173 }
4174 rc = -EAGAIN;
4175 goto out;
4176 }
4177
4178 rc = 0;
4179 if (check_ready)
4180 rc = ata_wait_ready(link, deadline, check_ready);
4181 out:
4182 if (rc && rc != -EAGAIN) {
4183 /* online is set iff link is online && reset succeeded */
4184 if (online)
4185 *online = false;
4186 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
4187 }
4188 DPRINTK("EXIT, rc=%d\n", rc);
4189 return rc;
4190 }
4191 EXPORT_SYMBOL_GPL(sata_link_hardreset);
4192
4193 /**
4194 * sata_std_hardreset - COMRESET w/o waiting or classification
4195 * @link: link to reset
4196 * @class: resulting class of attached device
4197 * @deadline: deadline jiffies for the operation
4198 *
4199 * Standard SATA COMRESET w/o waiting or classification.
4200 *
4201 * LOCKING:
4202 * Kernel thread context (may sleep)
4203 *
4204 * RETURNS:
4205 * 0 if link offline, -EAGAIN if link online, -errno on errors.
4206 */
4207 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4208 unsigned long deadline)
4209 {
4210 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4211 bool online;
4212 int rc;
4213
4214 /* do hardreset */
4215 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
4216 return online ? -EAGAIN : rc;
4217 }
4218 EXPORT_SYMBOL_GPL(sata_std_hardreset);
4219
4220 /**
4221 * ata_std_postreset - standard postreset callback
4222 * @link: the target ata_link
4223 * @classes: classes of attached devices
4224 *
4225 * This function is invoked after a successful reset. Note that
4226 * the device might have been reset more than once using
4227 * different reset methods before postreset is invoked.
4228 *
4229 * LOCKING:
4230 * Kernel thread context (may sleep)
4231 */
4232 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4233 {
4234 u32 serror;
4235
4236 DPRINTK("ENTER\n");
4237
4238 /* reset complete, clear SError */
4239 if (!sata_scr_read(link, SCR_ERROR, &serror))
4240 sata_scr_write(link, SCR_ERROR, serror);
4241
4242 /* print link status */
4243 sata_print_link_status(link);
4244
4245 DPRINTK("EXIT\n");
4246 }
4247 EXPORT_SYMBOL_GPL(ata_std_postreset);
4248
4249 /**
4250 * ata_dev_same_device - Determine whether new ID matches configured device
4251 * @dev: device to compare against
4252 * @new_class: class of the new device
4253 * @new_id: IDENTIFY page of the new device
4254 *
4255 * Compare @new_class and @new_id against @dev and determine
4256 * whether @dev is the device indicated by @new_class and
4257 * @new_id.
4258 *
4259 * LOCKING:
4260 * None.
4261 *
4262 * RETURNS:
4263 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4264 */
4265 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4266 const u16 *new_id)
4267 {
4268 const u16 *old_id = dev->id;
4269 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4270 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4271
4272 if (dev->class != new_class) {
4273 ata_dev_info(dev, "class mismatch %d != %d\n",
4274 dev->class, new_class);
4275 return 0;
4276 }
4277
4278 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4279 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4280 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4281 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4282
4283 if (strcmp(model[0], model[1])) {
4284 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
4285 model[0], model[1]);
4286 return 0;
4287 }
4288
4289 if (strcmp(serial[0], serial[1])) {
4290 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
4291 serial[0], serial[1]);
4292 return 0;
4293 }
4294
4295 return 1;
4296 }
4297
4298 /**
4299 * ata_dev_reread_id - Re-read IDENTIFY data
4300 * @dev: target ATA device
4301 * @readid_flags: read ID flags
4302 *
4303 * Re-read IDENTIFY page and make sure @dev is still attached to
4304 * the port.
4305 *
4306 * LOCKING:
4307 * Kernel thread context (may sleep)
4308 *
4309 * RETURNS:
4310 * 0 on success, negative errno otherwise
4311 */
4312 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4313 {
4314 unsigned int class = dev->class;
4315 u16 *id = (void *)dev->link->ap->sector_buf;
4316 int rc;
4317
4318 /* read ID data */
4319 rc = ata_dev_read_id(dev, &class, readid_flags, id);
4320 if (rc)
4321 return rc;
4322
4323 /* is the device still there? */
4324 if (!ata_dev_same_device(dev, class, id))
4325 return -ENODEV;
4326
4327 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4328 return 0;
4329 }
4330
4331 /**
4332 * ata_dev_revalidate - Revalidate ATA device
4333 * @dev: device to revalidate
4334 * @new_class: new class code
4335 * @readid_flags: read ID flags
4336 *
4337 * Re-read IDENTIFY page, make sure @dev is still attached to the
4338 * port and reconfigure it according to the new IDENTIFY page.
4339 *
4340 * LOCKING:
4341 * Kernel thread context (may sleep)
4342 *
4343 * RETURNS:
4344 * 0 on success, negative errno otherwise
4345 */
4346 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4347 unsigned int readid_flags)
4348 {
4349 u64 n_sectors = dev->n_sectors;
4350 u64 n_native_sectors = dev->n_native_sectors;
4351 int rc;
4352
4353 if (!ata_dev_enabled(dev))
4354 return -ENODEV;
4355
4356 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4357 if (ata_class_enabled(new_class) &&
4358 new_class != ATA_DEV_ATA &&
4359 new_class != ATA_DEV_ATAPI &&
4360 new_class != ATA_DEV_ZAC &&
4361 new_class != ATA_DEV_SEMB) {
4362 ata_dev_info(dev, "class mismatch %u != %u\n",
4363 dev->class, new_class);
4364 rc = -ENODEV;
4365 goto fail;
4366 }
4367
4368 /* re-read ID */
4369 rc = ata_dev_reread_id(dev, readid_flags);
4370 if (rc)
4371 goto fail;
4372
4373 /* configure device according to the new ID */
4374 rc = ata_dev_configure(dev);
4375 if (rc)
4376 goto fail;
4377
4378 /* verify n_sectors hasn't changed */
4379 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4380 dev->n_sectors == n_sectors)
4381 return 0;
4382
4383 /* n_sectors has changed */
4384 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
4385 (unsigned long long)n_sectors,
4386 (unsigned long long)dev->n_sectors);
4387
4388 /*
4389 * Something could have caused HPA to be unlocked
4390 * involuntarily. If n_native_sectors hasn't changed and the
4391 * new size matches it, keep the device.
4392 */
4393 if (dev->n_native_sectors == n_native_sectors &&
4394 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4395 ata_dev_warn(dev,
4396 "new n_sectors matches native, probably "
4397 "late HPA unlock, n_sectors updated\n");
4398 /* use the larger n_sectors */
4399 return 0;
4400 }
4401
4402 /*
4403 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4404 * unlocking HPA in those cases.
4405 *
4406 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4407 */
4408 if (dev->n_native_sectors == n_native_sectors &&
4409 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4410 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4411 ata_dev_warn(dev,
4412 "old n_sectors matches native, probably "
4413 "late HPA lock, will try to unlock HPA\n");
4414 /* try unlocking HPA */
4415 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4416 rc = -EIO;
4417 } else
4418 rc = -ENODEV;
4419
4420 /* restore original n_[native_]sectors and fail */
4421 dev->n_native_sectors = n_native_sectors;
4422 dev->n_sectors = n_sectors;
4423 fail:
4424 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4425 return rc;
4426 }
4427
4428 struct ata_blacklist_entry {
4429 const char *model_num;
4430 const char *model_rev;
4431 unsigned long horkage;
4432 };
4433
4434 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4435 /* Devices with DMA related problems under Linux */
4436 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4437 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4438 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4439 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4440 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4441 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4442 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4443 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4444 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4445 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4446 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4447 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4448 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4449 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4450 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4451 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4452 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4453 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4454 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4455 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4456 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4457 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4458 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4459 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4460 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4461 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4462 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4463 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4464 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4465 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
4466 /* Odd clown on sil3726/4726 PMPs */
4467 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4468
4469 /* Weird ATAPI devices */
4470 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4471 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4472 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4473 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4474
4475 /*
4476 * Causes silent data corruption with higher max sects.
4477 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4478 */
4479 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4480
4481 /*
4482 * These devices time out with higher max sects.
4483 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4484 */
4485 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4486 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4487
4488 /* Devices we expect to fail diagnostics */
4489
4490 /* Devices where NCQ should be avoided */
4491 /* NCQ is slow */
4492 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4493 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4494 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4495 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4496 /* NCQ is broken */
4497 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4498 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4499 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4500 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4501 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4502
4503 /* Seagate NCQ + FLUSH CACHE firmware bug */
4504 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4505 ATA_HORKAGE_FIRMWARE_WARN },
4506
4507 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4508 ATA_HORKAGE_FIRMWARE_WARN },
4509
4510 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4511 ATA_HORKAGE_FIRMWARE_WARN },
4512
4513 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4514 ATA_HORKAGE_FIRMWARE_WARN },
4515
4516 /* drives which fail FPDMA_AA activation (some may freeze afterwards)
4517 the ST disks also have LPM issues */
4518 { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
4519 ATA_HORKAGE_NOLPM, },
4520 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
4521
4522 /* Blacklist entries taken from Silicon Image 3124/3132
4523 Windows driver .inf file - also several Linux problem reports */
4524 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4525 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4526 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4527
4528 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4529 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4530
4531 /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
4532 SD7SN6S256G and SD8SN8U256G */
4533 { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
4534
4535 /* devices which puke on READ_NATIVE_MAX */
4536 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4537 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4538 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4539 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4540
4541 /* this one allows HPA unlocking but fails IOs on the area */
4542 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4543
4544 /* Devices which report 1 sector over size HPA */
4545 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4546 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4547 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4548
4549 /* Devices which get the IVB wrong */
4550 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4551 /* Maybe we should just blacklist TSSTcorp... */
4552 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4553
4554 /* Devices that do not need bridging limits applied */
4555 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4556 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
4557
4558 /* Devices which aren't very happy with higher link speeds */
4559 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4560 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
4561
4562 /*
4563 * Devices which choke on SETXFER. Applies only if both the
4564 * device and controller are SATA.
4565 */
4566 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
4567 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
4568 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
4569 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4570 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4571
4572 /* Crucial BX100 SSD 500GB has broken LPM support */
4573 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
4574
4575 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4576 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4577 ATA_HORKAGE_ZERO_AFTER_TRIM |
4578 ATA_HORKAGE_NOLPM, },
4579 /* 512GB MX100 with newer firmware has only LPM issues */
4580 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
4581 ATA_HORKAGE_NOLPM, },
4582
4583 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4584 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4585 ATA_HORKAGE_ZERO_AFTER_TRIM |
4586 ATA_HORKAGE_NOLPM, },
4587 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4588 ATA_HORKAGE_ZERO_AFTER_TRIM |
4589 ATA_HORKAGE_NOLPM, },
4590
4591 /* These specific Samsung models/firmware-revs do not handle LPM well */
4592 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
4593 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
4594 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
4595 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
4596
4597 /* devices that don't properly handle queued TRIM commands */
4598 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4599 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4600 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4601 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4602 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4603 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4604 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4605 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4606 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4607 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4608 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4609 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4610 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4611 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4612 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4613 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4614 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4615 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4616
4617 /* devices that don't properly handle TRIM commands */
4618 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4619
4620 /*
4621 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4622 * (Return Zero After Trim) flags in the ATA Command Set are
4623 * unreliable in the sense that they only define what happens if
4624 * the device successfully executed the DSM TRIM command. TRIM
4625 * is only advisory, however, and the device is free to silently
4626 * ignore all or parts of the request.
4627 *
4628 * Whitelist drives that are known to reliably return zeroes
4629 * after TRIM.
4630 */
4631
4632 /*
4633 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4634 * that model before whitelisting all other intel SSDs.
4635 */
4636 { "INTEL*SSDSC2MH*", NULL, 0, },
4637
4638 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4639 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4640 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4641 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4642 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4643 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4644 { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4645 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
4646
4647 /*
4648 * Some WD SATA-I drives spin up and down erratically when the link
4649 * is put into the slumber mode. We don't have full list of the
4650 * affected devices. Disable LPM if the device matches one of the
4651 * known prefixes and is SATA-1. As a side effect LPM partial is
4652 * lost too.
4653 *
4654 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4655 */
4656 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4657 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4658 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4659 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4660 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4661 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4662 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4663
4664 /* End Marker */
4665 { }
4666 };
4667
4668 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4669 {
4670 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4671 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4672 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4673
4674 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4675 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4676
4677 while (ad->model_num) {
4678 if (glob_match(ad->model_num, model_num)) {
4679 if (ad->model_rev == NULL)
4680 return ad->horkage;
4681 if (glob_match(ad->model_rev, model_rev))
4682 return ad->horkage;
4683 }
4684 ad++;
4685 }
4686 return 0;
4687 }
4688
4689 static int ata_dma_blacklisted(const struct ata_device *dev)
4690 {
4691 /* We don't support polling DMA.
4692 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4693 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4694 */
4695 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4696 (dev->flags & ATA_DFLAG_CDB_INTR))
4697 return 1;
4698 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4699 }
4700
4701 /**
4702 * ata_is_40wire - check drive side detection
4703 * @dev: device
4704 *
4705 * Perform drive side detection decoding, allowing for device vendors
4706 * who can't follow the documentation.
4707 */
4708
4709 static int ata_is_40wire(struct ata_device *dev)
4710 {
4711 if (dev->horkage & ATA_HORKAGE_IVB)
4712 return ata_drive_40wire_relaxed(dev->id);
4713 return ata_drive_40wire(dev->id);
4714 }
4715
4716 /**
4717 * cable_is_40wire - 40/80/SATA decider
4718 * @ap: port to consider
4719 *
4720 * This function encapsulates the policy for speed management
4721 * in one place. At the moment we don't cache the result but
4722 * there is a good case for setting ap->cbl to the result when
4723 * we are called with unknown cables (and figuring out if it
4724 * impacts hotplug at all).
4725 *
4726 * Return 1 if the cable appears to be 40 wire.
4727 */
4728
4729 static int cable_is_40wire(struct ata_port *ap)
4730 {
4731 struct ata_link *link;
4732 struct ata_device *dev;
4733
4734 /* If the controller thinks we are 40 wire, we are. */
4735 if (ap->cbl == ATA_CBL_PATA40)
4736 return 1;
4737
4738 /* If the controller thinks we are 80 wire, we are. */
4739 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4740 return 0;
4741
4742 /* If the system is known to be 40 wire short cable (eg
4743 * laptop), then we allow 80 wire modes even if the drive
4744 * isn't sure.
4745 */
4746 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4747 return 0;
4748
4749 /* If the controller doesn't know, we scan.
4750 *
4751 * Note: We look for all 40 wire detects at this point. Any
4752 * 80 wire detect is taken to be 80 wire cable because
4753 * - in many setups only the one drive (slave if present) will
4754 * give a valid detect
4755 * - if you have a non detect capable drive you don't want it
4756 * to colour the choice
4757 */
4758 ata_for_each_link(link, ap, EDGE) {
4759 ata_for_each_dev(dev, link, ENABLED) {
4760 if (!ata_is_40wire(dev))
4761 return 0;
4762 }
4763 }
4764 return 1;
4765 }
4766
4767 /**
4768 * ata_dev_xfermask - Compute supported xfermask of the given device
4769 * @dev: Device to compute xfermask for
4770 *
4771 * Compute supported xfermask of @dev and store it in
4772 * dev->*_mask. This function is responsible for applying all
4773 * known limits including host controller limits, device
4774 * blacklist, etc...
4775 *
4776 * LOCKING:
4777 * None.
4778 */
4779 static void ata_dev_xfermask(struct ata_device *dev)
4780 {
4781 struct ata_link *link = dev->link;
4782 struct ata_port *ap = link->ap;
4783 struct ata_host *host = ap->host;
4784 unsigned long xfer_mask;
4785
4786 /* controller modes available */
4787 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4788 ap->mwdma_mask, ap->udma_mask);
4789
4790 /* drive modes available */
4791 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4792 dev->mwdma_mask, dev->udma_mask);
4793 xfer_mask &= ata_id_xfermask(dev->id);
4794
4795 /*
4796 * CFA Advanced TrueIDE timings are not allowed on a shared
4797 * cable
4798 */
4799 if (ata_dev_pair(dev)) {
4800 /* No PIO5 or PIO6 */
4801 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4802 /* No MWDMA3 or MWDMA 4 */
4803 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4804 }
4805
4806 if (ata_dma_blacklisted(dev)) {
4807 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4808 ata_dev_warn(dev,
4809 "device is on DMA blacklist, disabling DMA\n");
4810 }
4811
4812 if ((host->flags & ATA_HOST_SIMPLEX) &&
4813 host->simplex_claimed && host->simplex_claimed != ap) {
4814 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4815 ata_dev_warn(dev,
4816 "simplex DMA is claimed by other device, disabling DMA\n");
4817 }
4818
4819 if (ap->flags & ATA_FLAG_NO_IORDY)
4820 xfer_mask &= ata_pio_mask_no_iordy(dev);
4821
4822 if (ap->ops->mode_filter)
4823 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4824
4825 /* Apply cable rule here. Don't apply it early because when
4826 * we handle hot plug the cable type can itself change.
4827 * Check this last so that we know if the transfer rate was
4828 * solely limited by the cable.
4829 * Unknown or 80 wire cables reported host side are checked
4830 * drive side as well. Cases where we know a 40wire cable
4831 * is used safely for 80 are not checked here.
4832 */
4833 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4834 /* UDMA/44 or higher would be available */
4835 if (cable_is_40wire(ap)) {
4836 ata_dev_warn(dev,
4837 "limited to UDMA/33 due to 40-wire cable\n");
4838 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4839 }
4840
4841 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4842 &dev->mwdma_mask, &dev->udma_mask);
4843 }
4844
4845 /**
4846 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4847 * @dev: Device to which command will be sent
4848 *
4849 * Issue SET FEATURES - XFER MODE command to device @dev
4850 * on port @ap.
4851 *
4852 * LOCKING:
4853 * PCI/etc. bus probe sem.
4854 *
4855 * RETURNS:
4856 * 0 on success, AC_ERR_* mask otherwise.
4857 */
4858
4859 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4860 {
4861 struct ata_taskfile tf;
4862 unsigned int err_mask;
4863
4864 /* set up set-features taskfile */
4865 DPRINTK("set features - xfer mode\n");
4866
4867 /* Some controllers and ATAPI devices show flaky interrupt
4868 * behavior after setting xfer mode. Use polling instead.
4869 */
4870 ata_tf_init(dev, &tf);
4871 tf.command = ATA_CMD_SET_FEATURES;
4872 tf.feature = SETFEATURES_XFER;
4873 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4874 tf.protocol = ATA_PROT_NODATA;
4875 /* If we are using IORDY we must send the mode setting command */
4876 if (ata_pio_need_iordy(dev))
4877 tf.nsect = dev->xfer_mode;
4878 /* If the device has IORDY and the controller does not - turn it off */
4879 else if (ata_id_has_iordy(dev->id))
4880 tf.nsect = 0x01;
4881 else /* In the ancient relic department - skip all of this */
4882 return 0;
4883
4884 /* On some disks, this command causes spin-up, so we need longer timeout */
4885 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4886
4887 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4888 return err_mask;
4889 }
4890
4891 /**
4892 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4893 * @dev: Device to which command will be sent
4894 * @enable: Whether to enable or disable the feature
4895 * @feature: The sector count represents the feature to set
4896 *
4897 * Issue SET FEATURES - SATA FEATURES command to device @dev
4898 * on port @ap with sector count
4899 *
4900 * LOCKING:
4901 * PCI/etc. bus probe sem.
4902 *
4903 * RETURNS:
4904 * 0 on success, AC_ERR_* mask otherwise.
4905 */
4906 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4907 {
4908 struct ata_taskfile tf;
4909 unsigned int err_mask;
4910 unsigned long timeout = 0;
4911
4912 /* set up set-features taskfile */
4913 DPRINTK("set features - SATA features\n");
4914
4915 ata_tf_init(dev, &tf);
4916 tf.command = ATA_CMD_SET_FEATURES;
4917 tf.feature = enable;
4918 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4919 tf.protocol = ATA_PROT_NODATA;
4920 tf.nsect = feature;
4921
4922 if (enable == SETFEATURES_SPINUP)
4923 timeout = ata_probe_timeout ?
4924 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4925 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4926
4927 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4928 return err_mask;
4929 }
4930 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4931
4932 /**
4933 * ata_dev_init_params - Issue INIT DEV PARAMS command
4934 * @dev: Device to which command will be sent
4935 * @heads: Number of heads (taskfile parameter)
4936 * @sectors: Number of sectors (taskfile parameter)
4937 *
4938 * LOCKING:
4939 * Kernel thread context (may sleep)
4940 *
4941 * RETURNS:
4942 * 0 on success, AC_ERR_* mask otherwise.
4943 */
4944 static unsigned int ata_dev_init_params(struct ata_device *dev,
4945 u16 heads, u16 sectors)
4946 {
4947 struct ata_taskfile tf;
4948 unsigned int err_mask;
4949
4950 /* Number of sectors per track 1-255. Number of heads 1-16 */
4951 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4952 return AC_ERR_INVALID;
4953
4954 /* set up init dev params taskfile */
4955 DPRINTK("init dev params \n");
4956
4957 ata_tf_init(dev, &tf);
4958 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4959 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4960 tf.protocol = ATA_PROT_NODATA;
4961 tf.nsect = sectors;
4962 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4963
4964 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4965 /* A clean abort indicates an original or just out of spec drive
4966 and we should continue as we issue the setup based on the
4967 drive reported working geometry */
4968 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4969 err_mask = 0;
4970
4971 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4972 return err_mask;
4973 }
4974
4975 /**
4976 * atapi_check_dma - Check whether ATAPI DMA can be supported
4977 * @qc: Metadata associated with taskfile to check
4978 *
4979 * Allow low-level driver to filter ATA PACKET commands, returning
4980 * a status indicating whether or not it is OK to use DMA for the
4981 * supplied PACKET command.
4982 *
4983 * LOCKING:
4984 * spin_lock_irqsave(host lock)
4985 *
4986 * RETURNS: 0 when ATAPI DMA can be used
4987 * nonzero otherwise
4988 */
4989 int atapi_check_dma(struct ata_queued_cmd *qc)
4990 {
4991 struct ata_port *ap = qc->ap;
4992
4993 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4994 * few ATAPI devices choke on such DMA requests.
4995 */
4996 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4997 unlikely(qc->nbytes & 15))
4998 return 1;
4999
5000 if (ap->ops->check_atapi_dma)
5001 return ap->ops->check_atapi_dma(qc);
5002
5003 return 0;
5004 }
5005
5006 /**
5007 * ata_std_qc_defer - Check whether a qc needs to be deferred
5008 * @qc: ATA command in question
5009 *
5010 * Non-NCQ commands cannot run with any other command, NCQ or
5011 * not. As upper layer only knows the queue depth, we are
5012 * responsible for maintaining exclusion. This function checks
5013 * whether a new command @qc can be issued.
5014 *
5015 * LOCKING:
5016 * spin_lock_irqsave(host lock)
5017 *
5018 * RETURNS:
5019 * ATA_DEFER_* if deferring is needed, 0 otherwise.
5020 */
5021 int ata_std_qc_defer(struct ata_queued_cmd *qc)
5022 {
5023 struct ata_link *link = qc->dev->link;
5024
5025 if (ata_is_ncq(qc->tf.protocol)) {
5026 if (!ata_tag_valid(link->active_tag))
5027 return 0;
5028 } else {
5029 if (!ata_tag_valid(link->active_tag) && !link->sactive)
5030 return 0;
5031 }
5032
5033 return ATA_DEFER_LINK;
5034 }
5035 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
5036
5037 enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
5038 {
5039 return AC_ERR_OK;
5040 }
5041 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5042
5043 /**
5044 * ata_sg_init - Associate command with scatter-gather table.
5045 * @qc: Command to be associated
5046 * @sg: Scatter-gather table.
5047 * @n_elem: Number of elements in s/g table.
5048 *
5049 * Initialize the data-related elements of queued_cmd @qc
5050 * to point to a scatter-gather table @sg, containing @n_elem
5051 * elements.
5052 *
5053 * LOCKING:
5054 * spin_lock_irqsave(host lock)
5055 */
5056 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
5057 unsigned int n_elem)
5058 {
5059 qc->sg = sg;
5060 qc->n_elem = n_elem;
5061 qc->cursg = qc->sg;
5062 }
5063
5064 #ifdef CONFIG_HAS_DMA
5065
5066 /**
5067 * ata_sg_clean - Unmap DMA memory associated with command
5068 * @qc: Command containing DMA memory to be released
5069 *
5070 * Unmap all mapped DMA memory associated with this command.
5071 *
5072 * LOCKING:
5073 * spin_lock_irqsave(host lock)
5074 */
5075 static void ata_sg_clean(struct ata_queued_cmd *qc)
5076 {
5077 struct ata_port *ap = qc->ap;
5078 struct scatterlist *sg = qc->sg;
5079 int dir = qc->dma_dir;
5080
5081 WARN_ON_ONCE(sg == NULL);
5082
5083 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
5084
5085 if (qc->n_elem)
5086 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
5087
5088 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5089 qc->sg = NULL;
5090 }
5091
5092 /**
5093 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
5094 * @qc: Command with scatter-gather table to be mapped.
5095 *
5096 * DMA-map the scatter-gather table associated with queued_cmd @qc.
5097 *
5098 * LOCKING:
5099 * spin_lock_irqsave(host lock)
5100 *
5101 * RETURNS:
5102 * Zero on success, negative on error.
5103 *
5104 */
5105 static int ata_sg_setup(struct ata_queued_cmd *qc)
5106 {
5107 struct ata_port *ap = qc->ap;
5108 unsigned int n_elem;
5109
5110 VPRINTK("ENTER, ata%u\n", ap->print_id);
5111
5112 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
5113 if (n_elem < 1)
5114 return -1;
5115
5116 VPRINTK("%d sg elements mapped\n", n_elem);
5117 qc->orig_n_elem = qc->n_elem;
5118 qc->n_elem = n_elem;
5119 qc->flags |= ATA_QCFLAG_DMAMAP;
5120
5121 return 0;
5122 }
5123
5124 #else /* !CONFIG_HAS_DMA */
5125
5126 static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
5127 static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
5128
5129 #endif /* !CONFIG_HAS_DMA */
5130
5131 /**
5132 * swap_buf_le16 - swap halves of 16-bit words in place
5133 * @buf: Buffer to swap
5134 * @buf_words: Number of 16-bit words in buffer.
5135 *
5136 * Swap halves of 16-bit words if needed to convert from
5137 * little-endian byte order to native cpu byte order, or
5138 * vice-versa.
5139 *
5140 * LOCKING:
5141 * Inherited from caller.
5142 */
5143 void swap_buf_le16(u16 *buf, unsigned int buf_words)
5144 {
5145 #ifdef __BIG_ENDIAN
5146 unsigned int i;
5147
5148 for (i = 0; i < buf_words; i++)
5149 buf[i] = le16_to_cpu(buf[i]);
5150 #endif /* __BIG_ENDIAN */
5151 }
5152
5153 /**
5154 * ata_qc_new_init - Request an available ATA command, and initialize it
5155 * @dev: Device from whom we request an available command structure
5156 * @tag: tag
5157 *
5158 * LOCKING:
5159 * None.
5160 */
5161
5162 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
5163 {
5164 struct ata_port *ap = dev->link->ap;
5165 struct ata_queued_cmd *qc;
5166
5167 /* no command while frozen */
5168 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5169 return NULL;
5170
5171 /* libsas case */
5172 if (ap->flags & ATA_FLAG_SAS_HOST) {
5173 tag = ata_sas_allocate_tag(ap);
5174 if (tag < 0)
5175 return NULL;
5176 }
5177
5178 qc = __ata_qc_from_tag(ap, tag);
5179 qc->tag = qc->hw_tag = tag;
5180 qc->scsicmd = NULL;
5181 qc->ap = ap;
5182 qc->dev = dev;
5183
5184 ata_qc_reinit(qc);
5185
5186 return qc;
5187 }
5188
5189 /**
5190 * ata_qc_free - free unused ata_queued_cmd
5191 * @qc: Command to complete
5192 *
5193 * Designed to free unused ata_queued_cmd object
5194 * in case something prevents using it.
5195 *
5196 * LOCKING:
5197 * spin_lock_irqsave(host lock)
5198 */
5199 void ata_qc_free(struct ata_queued_cmd *qc)
5200 {
5201 struct ata_port *ap;
5202 unsigned int tag;
5203
5204 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5205 ap = qc->ap;
5206
5207 qc->flags = 0;
5208 tag = qc->tag;
5209 if (ata_tag_valid(tag)) {
5210 qc->tag = ATA_TAG_POISON;
5211 if (ap->flags & ATA_FLAG_SAS_HOST)
5212 ata_sas_free_tag(tag, ap);
5213 }
5214 }
5215
5216 void __ata_qc_complete(struct ata_queued_cmd *qc)
5217 {
5218 struct ata_port *ap;
5219 struct ata_link *link;
5220
5221 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5222 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
5223 ap = qc->ap;
5224 link = qc->dev->link;
5225
5226 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5227 ata_sg_clean(qc);
5228
5229 /* command should be marked inactive atomically with qc completion */
5230 if (ata_is_ncq(qc->tf.protocol)) {
5231 link->sactive &= ~(1 << qc->hw_tag);
5232 if (!link->sactive)
5233 ap->nr_active_links--;
5234 } else {
5235 link->active_tag = ATA_TAG_POISON;
5236 ap->nr_active_links--;
5237 }
5238
5239 /* clear exclusive status */
5240 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5241 ap->excl_link == link))
5242 ap->excl_link = NULL;
5243
5244 /* atapi: mark qc as inactive to prevent the interrupt handler
5245 * from completing the command twice later, before the error handler
5246 * is called. (when rc != 0 and atapi request sense is needed)
5247 */
5248 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5249 ap->qc_active &= ~(1ULL << qc->tag);
5250
5251 /* call completion callback */
5252 qc->complete_fn(qc);
5253 }
5254
5255 static void fill_result_tf(struct ata_queued_cmd *qc)
5256 {
5257 struct ata_port *ap = qc->ap;
5258
5259 qc->result_tf.flags = qc->tf.flags;
5260 ap->ops->qc_fill_rtf(qc);
5261 }
5262
5263 static void ata_verify_xfer(struct ata_queued_cmd *qc)
5264 {
5265 struct ata_device *dev = qc->dev;
5266
5267 if (!ata_is_data(qc->tf.protocol))
5268 return;
5269
5270 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5271 return;
5272
5273 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5274 }
5275
5276 /**
5277 * ata_qc_complete - Complete an active ATA command
5278 * @qc: Command to complete
5279 *
5280 * Indicate to the mid and upper layers that an ATA command has
5281 * completed, with either an ok or not-ok status.
5282 *
5283 * Refrain from calling this function multiple times when
5284 * successfully completing multiple NCQ commands.
5285 * ata_qc_complete_multiple() should be used instead, which will
5286 * properly update IRQ expect state.
5287 *
5288 * LOCKING:
5289 * spin_lock_irqsave(host lock)
5290 */
5291 void ata_qc_complete(struct ata_queued_cmd *qc)
5292 {
5293 struct ata_port *ap = qc->ap;
5294
5295 /* Trigger the LED (if available) */
5296 ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
5297
5298 /* XXX: New EH and old EH use different mechanisms to
5299 * synchronize EH with regular execution path.
5300 *
5301 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5302 * Normal execution path is responsible for not accessing a
5303 * failed qc. libata core enforces the rule by returning NULL
5304 * from ata_qc_from_tag() for failed qcs.
5305 *
5306 * Old EH depends on ata_qc_complete() nullifying completion
5307 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5308 * not synchronize with interrupt handler. Only PIO task is
5309 * taken care of.
5310 */
5311 if (ap->ops->error_handler) {
5312 struct ata_device *dev = qc->dev;
5313 struct ata_eh_info *ehi = &dev->link->eh_info;
5314
5315 if (unlikely(qc->err_mask))
5316 qc->flags |= ATA_QCFLAG_FAILED;
5317
5318 /*
5319 * Finish internal commands without any further processing
5320 * and always with the result TF filled.
5321 */
5322 if (unlikely(ata_tag_internal(qc->tag))) {
5323 fill_result_tf(qc);
5324 trace_ata_qc_complete_internal(qc);
5325 __ata_qc_complete(qc);
5326 return;
5327 }
5328
5329 /*
5330 * Non-internal qc has failed. Fill the result TF and
5331 * summon EH.
5332 */
5333 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5334 fill_result_tf(qc);
5335 trace_ata_qc_complete_failed(qc);
5336 ata_qc_schedule_eh(qc);
5337 return;
5338 }
5339
5340 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
5341
5342 /* read result TF if requested */
5343 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5344 fill_result_tf(qc);
5345
5346 trace_ata_qc_complete_done(qc);
5347 /* Some commands need post-processing after successful
5348 * completion.
5349 */
5350 switch (qc->tf.command) {
5351 case ATA_CMD_SET_FEATURES:
5352 if (qc->tf.feature != SETFEATURES_WC_ON &&
5353 qc->tf.feature != SETFEATURES_WC_OFF &&
5354 qc->tf.feature != SETFEATURES_RA_ON &&
5355 qc->tf.feature != SETFEATURES_RA_OFF)
5356 break;
5357 /* fall through */
5358 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5359 case ATA_CMD_SET_MULTI: /* multi_count changed */
5360 /* revalidate device */
5361 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5362 ata_port_schedule_eh(ap);
5363 break;
5364
5365 case ATA_CMD_SLEEP:
5366 dev->flags |= ATA_DFLAG_SLEEPING;
5367 break;
5368 }
5369
5370 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5371 ata_verify_xfer(qc);
5372
5373 __ata_qc_complete(qc);
5374 } else {
5375 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5376 return;
5377
5378 /* read result TF if failed or requested */
5379 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5380 fill_result_tf(qc);
5381
5382 __ata_qc_complete(qc);
5383 }
5384 }
5385 EXPORT_SYMBOL_GPL(ata_qc_complete);
5386
5387 /**
5388 * ata_qc_get_active - get bitmask of active qcs
5389 * @ap: port in question
5390 *
5391 * LOCKING:
5392 * spin_lock_irqsave(host lock)
5393 *
5394 * RETURNS:
5395 * Bitmask of active qcs
5396 */
5397 u64 ata_qc_get_active(struct ata_port *ap)
5398 {
5399 u64 qc_active = ap->qc_active;
5400
5401 /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
5402 if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
5403 qc_active |= (1 << 0);
5404 qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
5405 }
5406
5407 return qc_active;
5408 }
5409 EXPORT_SYMBOL_GPL(ata_qc_get_active);
5410
5411 /**
5412 * ata_qc_complete_multiple - Complete multiple qcs successfully
5413 * @ap: port in question
5414 * @qc_active: new qc_active mask
5415 *
5416 * Complete in-flight commands. This functions is meant to be
5417 * called from low-level driver's interrupt routine to complete
5418 * requests normally. ap->qc_active and @qc_active is compared
5419 * and commands are completed accordingly.
5420 *
5421 * Always use this function when completing multiple NCQ commands
5422 * from IRQ handlers instead of calling ata_qc_complete()
5423 * multiple times to keep IRQ expect status properly in sync.
5424 *
5425 * LOCKING:
5426 * spin_lock_irqsave(host lock)
5427 *
5428 * RETURNS:
5429 * Number of completed commands on success, -errno otherwise.
5430 */
5431 int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
5432 {
5433 u64 done_mask, ap_qc_active = ap->qc_active;
5434 int nr_done = 0;
5435
5436 /*
5437 * If the internal tag is set on ap->qc_active, then we care about
5438 * bit0 on the passed in qc_active mask. Move that bit up to match
5439 * the internal tag.
5440 */
5441 if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
5442 qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
5443 qc_active ^= qc_active & 0x01;
5444 }
5445
5446 done_mask = ap_qc_active ^ qc_active;
5447
5448 if (unlikely(done_mask & qc_active)) {
5449 ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
5450 ap->qc_active, qc_active);
5451 return -EINVAL;
5452 }
5453
5454 while (done_mask) {
5455 struct ata_queued_cmd *qc;
5456 unsigned int tag = __ffs64(done_mask);
5457
5458 qc = ata_qc_from_tag(ap, tag);
5459 if (qc) {
5460 ata_qc_complete(qc);
5461 nr_done++;
5462 }
5463 done_mask &= ~(1ULL << tag);
5464 }
5465
5466 return nr_done;
5467 }
5468 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
5469
5470 /**
5471 * ata_qc_issue - issue taskfile to device
5472 * @qc: command to issue to device
5473 *
5474 * Prepare an ATA command to submission to device.
5475 * This includes mapping the data into a DMA-able
5476 * area, filling in the S/G table, and finally
5477 * writing the taskfile to hardware, starting the command.
5478 *
5479 * LOCKING:
5480 * spin_lock_irqsave(host lock)
5481 */
5482 void ata_qc_issue(struct ata_queued_cmd *qc)
5483 {
5484 struct ata_port *ap = qc->ap;
5485 struct ata_link *link = qc->dev->link;
5486 u8 prot = qc->tf.protocol;
5487
5488 /* Make sure only one non-NCQ command is outstanding. The
5489 * check is skipped for old EH because it reuses active qc to
5490 * request ATAPI sense.
5491 */
5492 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5493
5494 if (ata_is_ncq(prot)) {
5495 WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
5496
5497 if (!link->sactive)
5498 ap->nr_active_links++;
5499 link->sactive |= 1 << qc->hw_tag;
5500 } else {
5501 WARN_ON_ONCE(link->sactive);
5502
5503 ap->nr_active_links++;
5504 link->active_tag = qc->tag;
5505 }
5506
5507 qc->flags |= ATA_QCFLAG_ACTIVE;
5508 ap->qc_active |= 1ULL << qc->tag;
5509
5510 /*
5511 * We guarantee to LLDs that they will have at least one
5512 * non-zero sg if the command is a data command.
5513 */
5514 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
5515 goto sys_err;
5516
5517 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5518 (ap->flags & ATA_FLAG_PIO_DMA)))
5519 if (ata_sg_setup(qc))
5520 goto sys_err;
5521
5522 /* if device is sleeping, schedule reset and abort the link */
5523 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5524 link->eh_info.action |= ATA_EH_RESET;
5525 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5526 ata_link_abort(link);
5527 return;
5528 }
5529
5530 qc->err_mask |= ap->ops->qc_prep(qc);
5531 if (unlikely(qc->err_mask))
5532 goto err;
5533 trace_ata_qc_issue(qc);
5534 qc->err_mask |= ap->ops->qc_issue(qc);
5535 if (unlikely(qc->err_mask))
5536 goto err;
5537 return;
5538
5539 sys_err:
5540 qc->err_mask |= AC_ERR_SYSTEM;
5541 err:
5542 ata_qc_complete(qc);
5543 }
5544
5545 /**
5546 * sata_scr_valid - test whether SCRs are accessible
5547 * @link: ATA link to test SCR accessibility for
5548 *
5549 * Test whether SCRs are accessible for @link.
5550 *
5551 * LOCKING:
5552 * None.
5553 *
5554 * RETURNS:
5555 * 1 if SCRs are accessible, 0 otherwise.
5556 */
5557 int sata_scr_valid(struct ata_link *link)
5558 {
5559 struct ata_port *ap = link->ap;
5560
5561 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5562 }
5563 EXPORT_SYMBOL_GPL(sata_scr_valid);
5564
5565 /**
5566 * sata_scr_read - read SCR register of the specified port
5567 * @link: ATA link to read SCR for
5568 * @reg: SCR to read
5569 * @val: Place to store read value
5570 *
5571 * Read SCR register @reg of @link into *@val. This function is
5572 * guaranteed to succeed if @link is ap->link, the cable type of
5573 * the port is SATA and the port implements ->scr_read.
5574 *
5575 * LOCKING:
5576 * None if @link is ap->link. Kernel thread context otherwise.
5577 *
5578 * RETURNS:
5579 * 0 on success, negative errno on failure.
5580 */
5581 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5582 {
5583 if (ata_is_host_link(link)) {
5584 if (sata_scr_valid(link))
5585 return link->ap->ops->scr_read(link, reg, val);
5586 return -EOPNOTSUPP;
5587 }
5588
5589 return sata_pmp_scr_read(link, reg, val);
5590 }
5591 EXPORT_SYMBOL_GPL(sata_scr_read);
5592
5593 /**
5594 * sata_scr_write - write SCR register of the specified port
5595 * @link: ATA link to write SCR for
5596 * @reg: SCR to write
5597 * @val: value to write
5598 *
5599 * Write @val to SCR register @reg of @link. This function is
5600 * guaranteed to succeed if @link is ap->link, the cable type of
5601 * the port is SATA and the port implements ->scr_read.
5602 *
5603 * LOCKING:
5604 * None if @link is ap->link. Kernel thread context otherwise.
5605 *
5606 * RETURNS:
5607 * 0 on success, negative errno on failure.
5608 */
5609 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5610 {
5611 if (ata_is_host_link(link)) {
5612 if (sata_scr_valid(link))
5613 return link->ap->ops->scr_write(link, reg, val);
5614 return -EOPNOTSUPP;
5615 }
5616
5617 return sata_pmp_scr_write(link, reg, val);
5618 }
5619 EXPORT_SYMBOL_GPL(sata_scr_write);
5620
5621 /**
5622 * sata_scr_write_flush - write SCR register of the specified port and flush
5623 * @link: ATA link to write SCR for
5624 * @reg: SCR to write
5625 * @val: value to write
5626 *
5627 * This function is identical to sata_scr_write() except that this
5628 * function performs flush after writing to the register.
5629 *
5630 * LOCKING:
5631 * None if @link is ap->link. Kernel thread context otherwise.
5632 *
5633 * RETURNS:
5634 * 0 on success, negative errno on failure.
5635 */
5636 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5637 {
5638 if (ata_is_host_link(link)) {
5639 int rc;
5640
5641 if (sata_scr_valid(link)) {
5642 rc = link->ap->ops->scr_write(link, reg, val);
5643 if (rc == 0)
5644 rc = link->ap->ops->scr_read(link, reg, &val);
5645 return rc;
5646 }
5647 return -EOPNOTSUPP;
5648 }
5649
5650 return sata_pmp_scr_write(link, reg, val);
5651 }
5652 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
5653
5654 /**
5655 * ata_phys_link_online - test whether the given link is online
5656 * @link: ATA link to test
5657 *
5658 * Test whether @link is online. Note that this function returns
5659 * 0 if online status of @link cannot be obtained, so
5660 * ata_link_online(link) != !ata_link_offline(link).
5661 *
5662 * LOCKING:
5663 * None.
5664 *
5665 * RETURNS:
5666 * True if the port online status is available and online.
5667 */
5668 bool ata_phys_link_online(struct ata_link *link)
5669 {
5670 u32 sstatus;
5671
5672 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5673 ata_sstatus_online(sstatus))
5674 return true;
5675 return false;
5676 }
5677
5678 /**
5679 * ata_phys_link_offline - test whether the given link is offline
5680 * @link: ATA link to test
5681 *
5682 * Test whether @link is offline. Note that this function
5683 * returns 0 if offline status of @link cannot be obtained, so
5684 * ata_link_online(link) != !ata_link_offline(link).
5685 *
5686 * LOCKING:
5687 * None.
5688 *
5689 * RETURNS:
5690 * True if the port offline status is available and offline.
5691 */
5692 bool ata_phys_link_offline(struct ata_link *link)
5693 {
5694 u32 sstatus;
5695
5696 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5697 !ata_sstatus_online(sstatus))
5698 return true;
5699 return false;
5700 }
5701
5702 /**
5703 * ata_link_online - test whether the given link is online
5704 * @link: ATA link to test
5705 *
5706 * Test whether @link is online. This is identical to
5707 * ata_phys_link_online() when there's no slave link. When
5708 * there's a slave link, this function should only be called on
5709 * the master link and will return true if any of M/S links is
5710 * online.
5711 *
5712 * LOCKING:
5713 * None.
5714 *
5715 * RETURNS:
5716 * True if the port online status is available and online.
5717 */
5718 bool ata_link_online(struct ata_link *link)
5719 {
5720 struct ata_link *slave = link->ap->slave_link;
5721
5722 WARN_ON(link == slave); /* shouldn't be called on slave link */
5723
5724 return ata_phys_link_online(link) ||
5725 (slave && ata_phys_link_online(slave));
5726 }
5727 EXPORT_SYMBOL_GPL(ata_link_online);
5728
5729 /**
5730 * ata_link_offline - test whether the given link is offline
5731 * @link: ATA link to test
5732 *
5733 * Test whether @link is offline. This is identical to
5734 * ata_phys_link_offline() when there's no slave link. When
5735 * there's a slave link, this function should only be called on
5736 * the master link and will return true if both M/S links are
5737 * offline.
5738 *
5739 * LOCKING:
5740 * None.
5741 *
5742 * RETURNS:
5743 * True if the port offline status is available and offline.
5744 */
5745 bool ata_link_offline(struct ata_link *link)
5746 {
5747 struct ata_link *slave = link->ap->slave_link;
5748
5749 WARN_ON(link == slave); /* shouldn't be called on slave link */
5750
5751 return ata_phys_link_offline(link) &&
5752 (!slave || ata_phys_link_offline(slave));
5753 }
5754 EXPORT_SYMBOL_GPL(ata_link_offline);
5755
5756 #ifdef CONFIG_PM
5757 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5758 unsigned int action, unsigned int ehi_flags,
5759 bool async)
5760 {
5761 struct ata_link *link;
5762 unsigned long flags;
5763
5764 /* Previous resume operation might still be in
5765 * progress. Wait for PM_PENDING to clear.
5766 */
5767 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5768 ata_port_wait_eh(ap);
5769 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5770 }
5771
5772 /* request PM ops to EH */
5773 spin_lock_irqsave(ap->lock, flags);
5774
5775 ap->pm_mesg = mesg;
5776 ap->pflags |= ATA_PFLAG_PM_PENDING;
5777 ata_for_each_link(link, ap, HOST_FIRST) {
5778 link->eh_info.action |= action;
5779 link->eh_info.flags |= ehi_flags;
5780 }
5781
5782 ata_port_schedule_eh(ap);
5783
5784 spin_unlock_irqrestore(ap->lock, flags);
5785
5786 if (!async) {
5787 ata_port_wait_eh(ap);
5788 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5789 }
5790 }
5791
5792 /*
5793 * On some hardware, device fails to respond after spun down for suspend. As
5794 * the device won't be used before being resumed, we don't need to touch the
5795 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
5796 *
5797 * http://thread.gmane.org/gmane.linux.ide/46764
5798 */
5799 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
5800 | ATA_EHI_NO_AUTOPSY
5801 | ATA_EHI_NO_RECOVERY;
5802
5803 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
5804 {
5805 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
5806 }
5807
5808 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
5809 {
5810 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
5811 }
5812
5813 static int ata_port_pm_suspend(struct device *dev)
5814 {
5815 struct ata_port *ap = to_ata_port(dev);
5816
5817 if (pm_runtime_suspended(dev))
5818 return 0;
5819
5820 ata_port_suspend(ap, PMSG_SUSPEND);
5821 return 0;
5822 }
5823
5824 static int ata_port_pm_freeze(struct device *dev)
5825 {
5826 struct ata_port *ap = to_ata_port(dev);
5827
5828 if (pm_runtime_suspended(dev))
5829 return 0;
5830
5831 ata_port_suspend(ap, PMSG_FREEZE);
5832 return 0;
5833 }
5834
5835 static int ata_port_pm_poweroff(struct device *dev)
5836 {
5837 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5838 return 0;
5839 }
5840
5841 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5842 | ATA_EHI_QUIET;
5843
5844 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5845 {
5846 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5847 }
5848
5849 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5850 {
5851 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5852 }
5853
5854 static int ata_port_pm_resume(struct device *dev)
5855 {
5856 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5857 pm_runtime_disable(dev);
5858 pm_runtime_set_active(dev);
5859 pm_runtime_enable(dev);
5860 return 0;
5861 }
5862
5863 /*
5864 * For ODDs, the upper layer will poll for media change every few seconds,
5865 * which will make it enter and leave suspend state every few seconds. And
5866 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5867 * is very little and the ODD may malfunction after constantly being reset.
5868 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5869 * ODD is attached to the port.
5870 */
5871 static int ata_port_runtime_idle(struct device *dev)
5872 {
5873 struct ata_port *ap = to_ata_port(dev);
5874 struct ata_link *link;
5875 struct ata_device *adev;
5876
5877 ata_for_each_link(link, ap, HOST_FIRST) {
5878 ata_for_each_dev(adev, link, ENABLED)
5879 if (adev->class == ATA_DEV_ATAPI &&
5880 !zpodd_dev_enabled(adev))
5881 return -EBUSY;
5882 }
5883
5884 return 0;
5885 }
5886
5887 static int ata_port_runtime_suspend(struct device *dev)
5888 {
5889 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5890 return 0;
5891 }
5892
5893 static int ata_port_runtime_resume(struct device *dev)
5894 {
5895 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5896 return 0;
5897 }
5898
5899 static const struct dev_pm_ops ata_port_pm_ops = {
5900 .suspend = ata_port_pm_suspend,
5901 .resume = ata_port_pm_resume,
5902 .freeze = ata_port_pm_freeze,
5903 .thaw = ata_port_pm_resume,
5904 .poweroff = ata_port_pm_poweroff,
5905 .restore = ata_port_pm_resume,
5906
5907 .runtime_suspend = ata_port_runtime_suspend,
5908 .runtime_resume = ata_port_runtime_resume,
5909 .runtime_idle = ata_port_runtime_idle,
5910 };
5911
5912 /* sas ports don't participate in pm runtime management of ata_ports,
5913 * and need to resume ata devices at the domain level, not the per-port
5914 * level. sas suspend/resume is async to allow parallel port recovery
5915 * since sas has multiple ata_port instances per Scsi_Host.
5916 */
5917 void ata_sas_port_suspend(struct ata_port *ap)
5918 {
5919 ata_port_suspend_async(ap, PMSG_SUSPEND);
5920 }
5921 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5922
5923 void ata_sas_port_resume(struct ata_port *ap)
5924 {
5925 ata_port_resume_async(ap, PMSG_RESUME);
5926 }
5927 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5928
5929 /**
5930 * ata_host_suspend - suspend host
5931 * @host: host to suspend
5932 * @mesg: PM message
5933 *
5934 * Suspend @host. Actual operation is performed by port suspend.
5935 */
5936 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5937 {
5938 host->dev->power.power_state = mesg;
5939 return 0;
5940 }
5941 EXPORT_SYMBOL_GPL(ata_host_suspend);
5942
5943 /**
5944 * ata_host_resume - resume host
5945 * @host: host to resume
5946 *
5947 * Resume @host. Actual operation is performed by port resume.
5948 */
5949 void ata_host_resume(struct ata_host *host)
5950 {
5951 host->dev->power.power_state = PMSG_ON;
5952 }
5953 EXPORT_SYMBOL_GPL(ata_host_resume);
5954 #endif
5955
5956 const struct device_type ata_port_type = {
5957 .name = "ata_port",
5958 #ifdef CONFIG_PM
5959 .pm = &ata_port_pm_ops,
5960 #endif
5961 };
5962
5963 /**
5964 * ata_dev_init - Initialize an ata_device structure
5965 * @dev: Device structure to initialize
5966 *
5967 * Initialize @dev in preparation for probing.
5968 *
5969 * LOCKING:
5970 * Inherited from caller.
5971 */
5972 void ata_dev_init(struct ata_device *dev)
5973 {
5974 struct ata_link *link = ata_dev_phys_link(dev);
5975 struct ata_port *ap = link->ap;
5976 unsigned long flags;
5977
5978 /* SATA spd limit is bound to the attached device, reset together */
5979 link->sata_spd_limit = link->hw_sata_spd_limit;
5980 link->sata_spd = 0;
5981
5982 /* High bits of dev->flags are used to record warm plug
5983 * requests which occur asynchronously. Synchronize using
5984 * host lock.
5985 */
5986 spin_lock_irqsave(ap->lock, flags);
5987 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5988 dev->horkage = 0;
5989 spin_unlock_irqrestore(ap->lock, flags);
5990
5991 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5992 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5993 dev->pio_mask = UINT_MAX;
5994 dev->mwdma_mask = UINT_MAX;
5995 dev->udma_mask = UINT_MAX;
5996 }
5997
5998 /**
5999 * ata_link_init - Initialize an ata_link structure
6000 * @ap: ATA port link is attached to
6001 * @link: Link structure to initialize
6002 * @pmp: Port multiplier port number
6003 *
6004 * Initialize @link.
6005 *
6006 * LOCKING:
6007 * Kernel thread context (may sleep)
6008 */
6009 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
6010 {
6011 int i;
6012
6013 /* clear everything except for devices */
6014 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
6015 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
6016
6017 link->ap = ap;
6018 link->pmp = pmp;
6019 link->active_tag = ATA_TAG_POISON;
6020 link->hw_sata_spd_limit = UINT_MAX;
6021
6022 /* can't use iterator, ap isn't initialized yet */
6023 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6024 struct ata_device *dev = &link->device[i];
6025
6026 dev->link = link;
6027 dev->devno = dev - link->device;
6028 #ifdef CONFIG_ATA_ACPI
6029 dev->gtf_filter = ata_acpi_gtf_filter;
6030 #endif
6031 ata_dev_init(dev);
6032 }
6033 }
6034
6035 /**
6036 * sata_link_init_spd - Initialize link->sata_spd_limit
6037 * @link: Link to configure sata_spd_limit for
6038 *
6039 * Initialize @link->[hw_]sata_spd_limit to the currently
6040 * configured value.
6041 *
6042 * LOCKING:
6043 * Kernel thread context (may sleep).
6044 *
6045 * RETURNS:
6046 * 0 on success, -errno on failure.
6047 */
6048 int sata_link_init_spd(struct ata_link *link)
6049 {
6050 u8 spd;
6051 int rc;
6052
6053 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
6054 if (rc)
6055 return rc;
6056
6057 spd = (link->saved_scontrol >> 4) & 0xf;
6058 if (spd)
6059 link->hw_sata_spd_limit &= (1 << spd) - 1;
6060
6061 ata_force_link_limits(link);
6062
6063 link->sata_spd_limit = link->hw_sata_spd_limit;
6064
6065 return 0;
6066 }
6067
6068 /**
6069 * ata_port_alloc - allocate and initialize basic ATA port resources
6070 * @host: ATA host this allocated port belongs to
6071 *
6072 * Allocate and initialize basic ATA port resources.
6073 *
6074 * RETURNS:
6075 * Allocate ATA port on success, NULL on failure.
6076 *
6077 * LOCKING:
6078 * Inherited from calling layer (may sleep).
6079 */
6080 struct ata_port *ata_port_alloc(struct ata_host *host)
6081 {
6082 struct ata_port *ap;
6083
6084 DPRINTK("ENTER\n");
6085
6086 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6087 if (!ap)
6088 return NULL;
6089
6090 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
6091 ap->lock = &host->lock;
6092 ap->print_id = -1;
6093 ap->local_port_no = -1;
6094 ap->host = host;
6095 ap->dev = host->dev;
6096
6097 #if defined(ATA_VERBOSE_DEBUG)
6098 /* turn on all debugging levels */
6099 ap->msg_enable = 0x00FF;
6100 #elif defined(ATA_DEBUG)
6101 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6102 #else
6103 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6104 #endif
6105
6106 mutex_init(&ap->scsi_scan_mutex);
6107 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6108 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6109 INIT_LIST_HEAD(&ap->eh_done_q);
6110 init_waitqueue_head(&ap->eh_wait_q);
6111 init_completion(&ap->park_req_pending);
6112 timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
6113 TIMER_DEFERRABLE);
6114
6115 ap->cbl = ATA_CBL_NONE;
6116
6117 ata_link_init(ap, &ap->link, 0);
6118
6119 #ifdef ATA_IRQ_TRAP
6120 ap->stats.unhandled_irq = 1;
6121 ap->stats.idle_irq = 1;
6122 #endif
6123 ata_sff_port_init(ap);
6124
6125 return ap;
6126 }
6127
6128 static void ata_devres_release(struct device *gendev, void *res)
6129 {
6130 struct ata_host *host = dev_get_drvdata(gendev);
6131 int i;
6132
6133 for (i = 0; i < host->n_ports; i++) {
6134 struct ata_port *ap = host->ports[i];
6135
6136 if (!ap)
6137 continue;
6138
6139 if (ap->scsi_host)
6140 scsi_host_put(ap->scsi_host);
6141
6142 }
6143
6144 dev_set_drvdata(gendev, NULL);
6145 ata_host_put(host);
6146 }
6147
6148 static void ata_host_release(struct kref *kref)
6149 {
6150 struct ata_host *host = container_of(kref, struct ata_host, kref);
6151 int i;
6152
6153 for (i = 0; i < host->n_ports; i++) {
6154 struct ata_port *ap = host->ports[i];
6155
6156 kfree(ap->pmp_link);
6157 kfree(ap->slave_link);
6158 kfree(ap);
6159 host->ports[i] = NULL;
6160 }
6161 kfree(host);
6162 }
6163
6164 void ata_host_get(struct ata_host *host)
6165 {
6166 kref_get(&host->kref);
6167 }
6168
6169 void ata_host_put(struct ata_host *host)
6170 {
6171 kref_put(&host->kref, ata_host_release);
6172 }
6173 EXPORT_SYMBOL_GPL(ata_host_put);
6174
6175 /**
6176 * ata_host_alloc - allocate and init basic ATA host resources
6177 * @dev: generic device this host is associated with
6178 * @max_ports: maximum number of ATA ports associated with this host
6179 *
6180 * Allocate and initialize basic ATA host resources. LLD calls
6181 * this function to allocate a host, initializes it fully and
6182 * attaches it using ata_host_register().
6183 *
6184 * @max_ports ports are allocated and host->n_ports is
6185 * initialized to @max_ports. The caller is allowed to decrease
6186 * host->n_ports before calling ata_host_register(). The unused
6187 * ports will be automatically freed on registration.
6188 *
6189 * RETURNS:
6190 * Allocate ATA host on success, NULL on failure.
6191 *
6192 * LOCKING:
6193 * Inherited from calling layer (may sleep).
6194 */
6195 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6196 {
6197 struct ata_host *host;
6198 size_t sz;
6199 int i;
6200 void *dr;
6201
6202 DPRINTK("ENTER\n");
6203
6204 /* alloc a container for our list of ATA ports (buses) */
6205 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6206 host = kzalloc(sz, GFP_KERNEL);
6207 if (!host)
6208 return NULL;
6209
6210 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6211 goto err_free;
6212
6213 dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
6214 if (!dr)
6215 goto err_out;
6216
6217 devres_add(dev, dr);
6218 dev_set_drvdata(dev, host);
6219
6220 spin_lock_init(&host->lock);
6221 mutex_init(&host->eh_mutex);
6222 host->dev = dev;
6223 host->n_ports = max_ports;
6224 kref_init(&host->kref);
6225
6226 /* allocate ports bound to this host */
6227 for (i = 0; i < max_ports; i++) {
6228 struct ata_port *ap;
6229
6230 ap = ata_port_alloc(host);
6231 if (!ap)
6232 goto err_out;
6233
6234 ap->port_no = i;
6235 host->ports[i] = ap;
6236 }
6237
6238 devres_remove_group(dev, NULL);
6239 return host;
6240
6241 err_out:
6242 devres_release_group(dev, NULL);
6243 err_free:
6244 kfree(host);
6245 return NULL;
6246 }
6247 EXPORT_SYMBOL_GPL(ata_host_alloc);
6248
6249 /**
6250 * ata_host_alloc_pinfo - alloc host and init with port_info array
6251 * @dev: generic device this host is associated with
6252 * @ppi: array of ATA port_info to initialize host with
6253 * @n_ports: number of ATA ports attached to this host
6254 *
6255 * Allocate ATA host and initialize with info from @ppi. If NULL
6256 * terminated, @ppi may contain fewer entries than @n_ports. The
6257 * last entry will be used for the remaining ports.
6258 *
6259 * RETURNS:
6260 * Allocate ATA host on success, NULL on failure.
6261 *
6262 * LOCKING:
6263 * Inherited from calling layer (may sleep).
6264 */
6265 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6266 const struct ata_port_info * const * ppi,
6267 int n_ports)
6268 {
6269 const struct ata_port_info *pi;
6270 struct ata_host *host;
6271 int i, j;
6272
6273 host = ata_host_alloc(dev, n_ports);
6274 if (!host)
6275 return NULL;
6276
6277 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6278 struct ata_port *ap = host->ports[i];
6279
6280 if (ppi[j])
6281 pi = ppi[j++];
6282
6283 ap->pio_mask = pi->pio_mask;
6284 ap->mwdma_mask = pi->mwdma_mask;
6285 ap->udma_mask = pi->udma_mask;
6286 ap->flags |= pi->flags;
6287 ap->link.flags |= pi->link_flags;
6288 ap->ops = pi->port_ops;
6289
6290 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6291 host->ops = pi->port_ops;
6292 }
6293
6294 return host;
6295 }
6296 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6297
6298 /**
6299 * ata_slave_link_init - initialize slave link
6300 * @ap: port to initialize slave link for
6301 *
6302 * Create and initialize slave link for @ap. This enables slave
6303 * link handling on the port.
6304 *
6305 * In libata, a port contains links and a link contains devices.
6306 * There is single host link but if a PMP is attached to it,
6307 * there can be multiple fan-out links. On SATA, there's usually
6308 * a single device connected to a link but PATA and SATA
6309 * controllers emulating TF based interface can have two - master
6310 * and slave.
6311 *
6312 * However, there are a few controllers which don't fit into this
6313 * abstraction too well - SATA controllers which emulate TF
6314 * interface with both master and slave devices but also have
6315 * separate SCR register sets for each device. These controllers
6316 * need separate links for physical link handling
6317 * (e.g. onlineness, link speed) but should be treated like a
6318 * traditional M/S controller for everything else (e.g. command
6319 * issue, softreset).
6320 *
6321 * slave_link is libata's way of handling this class of
6322 * controllers without impacting core layer too much. For
6323 * anything other than physical link handling, the default host
6324 * link is used for both master and slave. For physical link
6325 * handling, separate @ap->slave_link is used. All dirty details
6326 * are implemented inside libata core layer. From LLD's POV, the
6327 * only difference is that prereset, hardreset and postreset are
6328 * called once more for the slave link, so the reset sequence
6329 * looks like the following.
6330 *
6331 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
6332 * softreset(M) -> postreset(M) -> postreset(S)
6333 *
6334 * Note that softreset is called only for the master. Softreset
6335 * resets both M/S by definition, so SRST on master should handle
6336 * both (the standard method will work just fine).
6337 *
6338 * LOCKING:
6339 * Should be called before host is registered.
6340 *
6341 * RETURNS:
6342 * 0 on success, -errno on failure.
6343 */
6344 int ata_slave_link_init(struct ata_port *ap)
6345 {
6346 struct ata_link *link;
6347
6348 WARN_ON(ap->slave_link);
6349 WARN_ON(ap->flags & ATA_FLAG_PMP);
6350
6351 link = kzalloc(sizeof(*link), GFP_KERNEL);
6352 if (!link)
6353 return -ENOMEM;
6354
6355 ata_link_init(ap, link, 1);
6356 ap->slave_link = link;
6357 return 0;
6358 }
6359 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6360
6361 static void ata_host_stop(struct device *gendev, void *res)
6362 {
6363 struct ata_host *host = dev_get_drvdata(gendev);
6364 int i;
6365
6366 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6367
6368 for (i = 0; i < host->n_ports; i++) {
6369 struct ata_port *ap = host->ports[i];
6370
6371 if (ap->ops->port_stop)
6372 ap->ops->port_stop(ap);
6373 }
6374
6375 if (host->ops->host_stop)
6376 host->ops->host_stop(host);
6377 }
6378
6379 /**
6380 * ata_finalize_port_ops - finalize ata_port_operations
6381 * @ops: ata_port_operations to finalize
6382 *
6383 * An ata_port_operations can inherit from another ops and that
6384 * ops can again inherit from another. This can go on as many
6385 * times as necessary as long as there is no loop in the
6386 * inheritance chain.
6387 *
6388 * Ops tables are finalized when the host is started. NULL or
6389 * unspecified entries are inherited from the closet ancestor
6390 * which has the method and the entry is populated with it.
6391 * After finalization, the ops table directly points to all the
6392 * methods and ->inherits is no longer necessary and cleared.
6393 *
6394 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
6395 *
6396 * LOCKING:
6397 * None.
6398 */
6399 static void ata_finalize_port_ops(struct ata_port_operations *ops)
6400 {
6401 static DEFINE_SPINLOCK(lock);
6402 const struct ata_port_operations *cur;
6403 void **begin = (void **)ops;
6404 void **end = (void **)&ops->inherits;
6405 void **pp;
6406
6407 if (!ops || !ops->inherits)
6408 return;
6409
6410 spin_lock(&lock);
6411
6412 for (cur = ops->inherits; cur; cur = cur->inherits) {
6413 void **inherit = (void **)cur;
6414
6415 for (pp = begin; pp < end; pp++, inherit++)
6416 if (!*pp)
6417 *pp = *inherit;
6418 }
6419
6420 for (pp = begin; pp < end; pp++)
6421 if (IS_ERR(*pp))
6422 *pp = NULL;
6423
6424 ops->inherits = NULL;
6425
6426 spin_unlock(&lock);
6427 }
6428
6429 /**
6430 * ata_host_start - start and freeze ports of an ATA host
6431 * @host: ATA host to start ports for
6432 *
6433 * Start and then freeze ports of @host. Started status is
6434 * recorded in host->flags, so this function can be called
6435 * multiple times. Ports are guaranteed to get started only
6436 * once. If host->ops isn't initialized yet, its set to the
6437 * first non-dummy port ops.
6438 *
6439 * LOCKING:
6440 * Inherited from calling layer (may sleep).
6441 *
6442 * RETURNS:
6443 * 0 if all ports are started successfully, -errno otherwise.
6444 */
6445 int ata_host_start(struct ata_host *host)
6446 {
6447 int have_stop = 0;
6448 void *start_dr = NULL;
6449 int i, rc;
6450
6451 if (host->flags & ATA_HOST_STARTED)
6452 return 0;
6453
6454 ata_finalize_port_ops(host->ops);
6455
6456 for (i = 0; i < host->n_ports; i++) {
6457 struct ata_port *ap = host->ports[i];
6458
6459 ata_finalize_port_ops(ap->ops);
6460
6461 if (!host->ops && !ata_port_is_dummy(ap))
6462 host->ops = ap->ops;
6463
6464 if (ap->ops->port_stop)
6465 have_stop = 1;
6466 }
6467
6468 if (host->ops->host_stop)
6469 have_stop = 1;
6470
6471 if (have_stop) {
6472 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6473 if (!start_dr)
6474 return -ENOMEM;
6475 }
6476
6477 for (i = 0; i < host->n_ports; i++) {
6478 struct ata_port *ap = host->ports[i];
6479
6480 if (ap->ops->port_start) {
6481 rc = ap->ops->port_start(ap);
6482 if (rc) {
6483 if (rc != -ENODEV)
6484 dev_err(host->dev,
6485 "failed to start port %d (errno=%d)\n",
6486 i, rc);
6487 goto err_out;
6488 }
6489 }
6490 ata_eh_freeze_port(ap);
6491 }
6492
6493 if (start_dr)
6494 devres_add(host->dev, start_dr);
6495 host->flags |= ATA_HOST_STARTED;
6496 return 0;
6497
6498 err_out:
6499 while (--i >= 0) {
6500 struct ata_port *ap = host->ports[i];
6501
6502 if (ap->ops->port_stop)
6503 ap->ops->port_stop(ap);
6504 }
6505 devres_free(start_dr);
6506 return rc;
6507 }
6508 EXPORT_SYMBOL_GPL(ata_host_start);
6509
6510 /**
6511 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
6512 * @host: host to initialize
6513 * @dev: device host is attached to
6514 * @ops: port_ops
6515 *
6516 */
6517 void ata_host_init(struct ata_host *host, struct device *dev,
6518 struct ata_port_operations *ops)
6519 {
6520 spin_lock_init(&host->lock);
6521 mutex_init(&host->eh_mutex);
6522 host->n_tags = ATA_MAX_QUEUE;
6523 host->dev = dev;
6524 host->ops = ops;
6525 kref_init(&host->kref);
6526 }
6527 EXPORT_SYMBOL_GPL(ata_host_init);
6528
6529 void __ata_port_probe(struct ata_port *ap)
6530 {
6531 struct ata_eh_info *ehi = &ap->link.eh_info;
6532 unsigned long flags;
6533
6534 /* kick EH for boot probing */
6535 spin_lock_irqsave(ap->lock, flags);
6536
6537 ehi->probe_mask |= ATA_ALL_DEVICES;
6538 ehi->action |= ATA_EH_RESET;
6539 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6540
6541 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6542 ap->pflags |= ATA_PFLAG_LOADING;
6543 ata_port_schedule_eh(ap);
6544
6545 spin_unlock_irqrestore(ap->lock, flags);
6546 }
6547
6548 int ata_port_probe(struct ata_port *ap)
6549 {
6550 int rc = 0;
6551
6552 if (ap->ops->error_handler) {
6553 __ata_port_probe(ap);
6554 ata_port_wait_eh(ap);
6555 } else {
6556 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6557 rc = ata_bus_probe(ap);
6558 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6559 }
6560 return rc;
6561 }
6562
6563
6564 static void async_port_probe(void *data, async_cookie_t cookie)
6565 {
6566 struct ata_port *ap = data;
6567
6568 /*
6569 * If we're not allowed to scan this host in parallel,
6570 * we need to wait until all previous scans have completed
6571 * before going further.
6572 * Jeff Garzik says this is only within a controller, so we
6573 * don't need to wait for port 0, only for later ports.
6574 */
6575 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6576 async_synchronize_cookie(cookie);
6577
6578 (void)ata_port_probe(ap);
6579
6580 /* in order to keep device order, we need to synchronize at this point */
6581 async_synchronize_cookie(cookie);
6582
6583 ata_scsi_scan_host(ap, 1);
6584 }
6585
6586 /**
6587 * ata_host_register - register initialized ATA host
6588 * @host: ATA host to register
6589 * @sht: template for SCSI host
6590 *
6591 * Register initialized ATA host. @host is allocated using
6592 * ata_host_alloc() and fully initialized by LLD. This function
6593 * starts ports, registers @host with ATA and SCSI layers and
6594 * probe registered devices.
6595 *
6596 * LOCKING:
6597 * Inherited from calling layer (may sleep).
6598 *
6599 * RETURNS:
6600 * 0 on success, -errno otherwise.
6601 */
6602 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6603 {
6604 int i, rc;
6605
6606 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
6607
6608 /* host must have been started */
6609 if (!(host->flags & ATA_HOST_STARTED)) {
6610 dev_err(host->dev, "BUG: trying to register unstarted host\n");
6611 WARN_ON(1);
6612 return -EINVAL;
6613 }
6614
6615 /* Blow away unused ports. This happens when LLD can't
6616 * determine the exact number of ports to allocate at
6617 * allocation time.
6618 */
6619 for (i = host->n_ports; host->ports[i]; i++)
6620 kfree(host->ports[i]);
6621
6622 /* give ports names and add SCSI hosts */
6623 for (i = 0; i < host->n_ports; i++) {
6624 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
6625 host->ports[i]->local_port_no = i + 1;
6626 }
6627
6628 /* Create associated sysfs transport objects */
6629 for (i = 0; i < host->n_ports; i++) {
6630 rc = ata_tport_add(host->dev,host->ports[i]);
6631 if (rc) {
6632 goto err_tadd;
6633 }
6634 }
6635
6636 rc = ata_scsi_add_hosts(host, sht);
6637 if (rc)
6638 goto err_tadd;
6639
6640 /* set cable, sata_spd_limit and report */
6641 for (i = 0; i < host->n_ports; i++) {
6642 struct ata_port *ap = host->ports[i];
6643 unsigned long xfer_mask;
6644
6645 /* set SATA cable type if still unset */
6646 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6647 ap->cbl = ATA_CBL_SATA;
6648
6649 /* init sata_spd_limit to the current value */
6650 sata_link_init_spd(&ap->link);
6651 if (ap->slave_link)
6652 sata_link_init_spd(ap->slave_link);
6653
6654 /* print per-port info to dmesg */
6655 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6656 ap->udma_mask);
6657
6658 if (!ata_port_is_dummy(ap)) {
6659 ata_port_info(ap, "%cATA max %s %s\n",
6660 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6661 ata_mode_string(xfer_mask),
6662 ap->link.eh_info.desc);
6663 ata_ehi_clear_desc(&ap->link.eh_info);
6664 } else
6665 ata_port_info(ap, "DUMMY\n");
6666 }
6667
6668 /* perform each probe asynchronously */
6669 for (i = 0; i < host->n_ports; i++) {
6670 struct ata_port *ap = host->ports[i];
6671 async_schedule(async_port_probe, ap);
6672 }
6673
6674 return 0;
6675
6676 err_tadd:
6677 while (--i >= 0) {
6678 ata_tport_delete(host->ports[i]);
6679 }
6680 return rc;
6681
6682 }
6683 EXPORT_SYMBOL_GPL(ata_host_register);
6684
6685 /**
6686 * ata_host_activate - start host, request IRQ and register it
6687 * @host: target ATA host
6688 * @irq: IRQ to request
6689 * @irq_handler: irq_handler used when requesting IRQ
6690 * @irq_flags: irq_flags used when requesting IRQ
6691 * @sht: scsi_host_template to use when registering the host
6692 *
6693 * After allocating an ATA host and initializing it, most libata
6694 * LLDs perform three steps to activate the host - start host,
6695 * request IRQ and register it. This helper takes necessary
6696 * arguments and performs the three steps in one go.
6697 *
6698 * An invalid IRQ skips the IRQ registration and expects the host to
6699 * have set polling mode on the port. In this case, @irq_handler
6700 * should be NULL.
6701 *
6702 * LOCKING:
6703 * Inherited from calling layer (may sleep).
6704 *
6705 * RETURNS:
6706 * 0 on success, -errno otherwise.
6707 */
6708 int ata_host_activate(struct ata_host *host, int irq,
6709 irq_handler_t irq_handler, unsigned long irq_flags,
6710 struct scsi_host_template *sht)
6711 {
6712 int i, rc;
6713 char *irq_desc;
6714
6715 rc = ata_host_start(host);
6716 if (rc)
6717 return rc;
6718
6719 /* Special case for polling mode */
6720 if (!irq) {
6721 WARN_ON(irq_handler);
6722 return ata_host_register(host, sht);
6723 }
6724
6725 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6726 dev_driver_string(host->dev),
6727 dev_name(host->dev));
6728 if (!irq_desc)
6729 return -ENOMEM;
6730
6731 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6732 irq_desc, host);
6733 if (rc)
6734 return rc;
6735
6736 for (i = 0; i < host->n_ports; i++)
6737 ata_port_desc(host->ports[i], "irq %d", irq);
6738
6739 rc = ata_host_register(host, sht);
6740 /* if failed, just free the IRQ and leave ports alone */
6741 if (rc)
6742 devm_free_irq(host->dev, irq, host);
6743
6744 return rc;
6745 }
6746 EXPORT_SYMBOL_GPL(ata_host_activate);
6747
6748 /**
6749 * ata_port_detach - Detach ATA port in preparation of device removal
6750 * @ap: ATA port to be detached
6751 *
6752 * Detach all ATA devices and the associated SCSI devices of @ap;
6753 * then, remove the associated SCSI host. @ap is guaranteed to
6754 * be quiescent on return from this function.
6755 *
6756 * LOCKING:
6757 * Kernel thread context (may sleep).
6758 */
6759 static void ata_port_detach(struct ata_port *ap)
6760 {
6761 unsigned long flags;
6762 struct ata_link *link;
6763 struct ata_device *dev;
6764
6765 if (!ap->ops->error_handler)
6766 goto skip_eh;
6767
6768 /* tell EH we're leaving & flush EH */
6769 spin_lock_irqsave(ap->lock, flags);
6770 ap->pflags |= ATA_PFLAG_UNLOADING;
6771 ata_port_schedule_eh(ap);
6772 spin_unlock_irqrestore(ap->lock, flags);
6773
6774 /* wait till EH commits suicide */
6775 ata_port_wait_eh(ap);
6776
6777 /* it better be dead now */
6778 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6779
6780 cancel_delayed_work_sync(&ap->hotplug_task);
6781
6782 skip_eh:
6783 /* clean up zpodd on port removal */
6784 ata_for_each_link(link, ap, HOST_FIRST) {
6785 ata_for_each_dev(dev, link, ALL) {
6786 if (zpodd_dev_enabled(dev))
6787 zpodd_exit(dev);
6788 }
6789 }
6790 if (ap->pmp_link) {
6791 int i;
6792 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6793 ata_tlink_delete(&ap->pmp_link[i]);
6794 }
6795 /* remove the associated SCSI host */
6796 scsi_remove_host(ap->scsi_host);
6797 ata_tport_delete(ap);
6798 }
6799
6800 /**
6801 * ata_host_detach - Detach all ports of an ATA host
6802 * @host: Host to detach
6803 *
6804 * Detach all ports of @host.
6805 *
6806 * LOCKING:
6807 * Kernel thread context (may sleep).
6808 */
6809 void ata_host_detach(struct ata_host *host)
6810 {
6811 int i;
6812
6813 /* Ensure ata_port probe has completed */
6814 async_synchronize_full();
6815
6816 for (i = 0; i < host->n_ports; i++)
6817 ata_port_detach(host->ports[i]);
6818
6819 /* the host is dead now, dissociate ACPI */
6820 ata_acpi_dissociate(host);
6821 }
6822 EXPORT_SYMBOL_GPL(ata_host_detach);
6823
6824 #ifdef CONFIG_PCI
6825
6826 /**
6827 * ata_pci_remove_one - PCI layer callback for device removal
6828 * @pdev: PCI device that was removed
6829 *
6830 * PCI layer indicates to libata via this hook that hot-unplug or
6831 * module unload event has occurred. Detach all ports. Resource
6832 * release is handled via devres.
6833 *
6834 * LOCKING:
6835 * Inherited from PCI layer (may sleep).
6836 */
6837 void ata_pci_remove_one(struct pci_dev *pdev)
6838 {
6839 struct ata_host *host = pci_get_drvdata(pdev);
6840
6841 ata_host_detach(host);
6842 }
6843 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6844
6845 void ata_pci_shutdown_one(struct pci_dev *pdev)
6846 {
6847 struct ata_host *host = pci_get_drvdata(pdev);
6848 int i;
6849
6850 for (i = 0; i < host->n_ports; i++) {
6851 struct ata_port *ap = host->ports[i];
6852
6853 ap->pflags |= ATA_PFLAG_FROZEN;
6854
6855 /* Disable port interrupts */
6856 if (ap->ops->freeze)
6857 ap->ops->freeze(ap);
6858
6859 /* Stop the port DMA engines */
6860 if (ap->ops->port_stop)
6861 ap->ops->port_stop(ap);
6862 }
6863 }
6864 EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
6865
6866 /* move to PCI subsystem */
6867 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6868 {
6869 unsigned long tmp = 0;
6870
6871 switch (bits->width) {
6872 case 1: {
6873 u8 tmp8 = 0;
6874 pci_read_config_byte(pdev, bits->reg, &tmp8);
6875 tmp = tmp8;
6876 break;
6877 }
6878 case 2: {
6879 u16 tmp16 = 0;
6880 pci_read_config_word(pdev, bits->reg, &tmp16);
6881 tmp = tmp16;
6882 break;
6883 }
6884 case 4: {
6885 u32 tmp32 = 0;
6886 pci_read_config_dword(pdev, bits->reg, &tmp32);
6887 tmp = tmp32;
6888 break;
6889 }
6890
6891 default:
6892 return -EINVAL;
6893 }
6894
6895 tmp &= bits->mask;
6896
6897 return (tmp == bits->val) ? 1 : 0;
6898 }
6899 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6900
6901 #ifdef CONFIG_PM
6902 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6903 {
6904 pci_save_state(pdev);
6905 pci_disable_device(pdev);
6906
6907 if (mesg.event & PM_EVENT_SLEEP)
6908 pci_set_power_state(pdev, PCI_D3hot);
6909 }
6910 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6911
6912 int ata_pci_device_do_resume(struct pci_dev *pdev)
6913 {
6914 int rc;
6915
6916 pci_set_power_state(pdev, PCI_D0);
6917 pci_restore_state(pdev);
6918
6919 rc = pcim_enable_device(pdev);
6920 if (rc) {
6921 dev_err(&pdev->dev,
6922 "failed to enable device after resume (%d)\n", rc);
6923 return rc;
6924 }
6925
6926 pci_set_master(pdev);
6927 return 0;
6928 }
6929 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6930
6931 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6932 {
6933 struct ata_host *host = pci_get_drvdata(pdev);
6934 int rc = 0;
6935
6936 rc = ata_host_suspend(host, mesg);
6937 if (rc)
6938 return rc;
6939
6940 ata_pci_device_do_suspend(pdev, mesg);
6941
6942 return 0;
6943 }
6944 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6945
6946 int ata_pci_device_resume(struct pci_dev *pdev)
6947 {
6948 struct ata_host *host = pci_get_drvdata(pdev);
6949 int rc;
6950
6951 rc = ata_pci_device_do_resume(pdev);
6952 if (rc == 0)
6953 ata_host_resume(host);
6954 return rc;
6955 }
6956 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6957 #endif /* CONFIG_PM */
6958 #endif /* CONFIG_PCI */
6959
6960 /**
6961 * ata_platform_remove_one - Platform layer callback for device removal
6962 * @pdev: Platform device that was removed
6963 *
6964 * Platform layer indicates to libata via this hook that hot-unplug or
6965 * module unload event has occurred. Detach all ports. Resource
6966 * release is handled via devres.
6967 *
6968 * LOCKING:
6969 * Inherited from platform layer (may sleep).
6970 */
6971 int ata_platform_remove_one(struct platform_device *pdev)
6972 {
6973 struct ata_host *host = platform_get_drvdata(pdev);
6974
6975 ata_host_detach(host);
6976
6977 return 0;
6978 }
6979 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6980
6981 static int __init ata_parse_force_one(char **cur,
6982 struct ata_force_ent *force_ent,
6983 const char **reason)
6984 {
6985 static const struct ata_force_param force_tbl[] __initconst = {
6986 { "40c", .cbl = ATA_CBL_PATA40 },
6987 { "80c", .cbl = ATA_CBL_PATA80 },
6988 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6989 { "unk", .cbl = ATA_CBL_PATA_UNK },
6990 { "ign", .cbl = ATA_CBL_PATA_IGN },
6991 { "sata", .cbl = ATA_CBL_SATA },
6992 { "1.5Gbps", .spd_limit = 1 },
6993 { "3.0Gbps", .spd_limit = 2 },
6994 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6995 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6996 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
6997 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
6998 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6999 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
7000 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
7001 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
7002 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
7003 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
7004 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
7005 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
7006 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
7007 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
7008 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
7009 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
7010 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
7011 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7012 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7013 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7014 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7015 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7016 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7017 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7018 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7019 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7020 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7021 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7022 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7023 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7024 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7025 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7026 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7027 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7028 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7029 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7030 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7031 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7032 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
7033 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
7034 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
7035 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
7036 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
7037 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
7038 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
7039 };
7040 char *start = *cur, *p = *cur;
7041 char *id, *val, *endp;
7042 const struct ata_force_param *match_fp = NULL;
7043 int nr_matches = 0, i;
7044
7045 /* find where this param ends and update *cur */
7046 while (*p != '\0' && *p != ',')
7047 p++;
7048
7049 if (*p == '\0')
7050 *cur = p;
7051 else
7052 *cur = p + 1;
7053
7054 *p = '\0';
7055
7056 /* parse */
7057 p = strchr(start, ':');
7058 if (!p) {
7059 val = strstrip(start);
7060 goto parse_val;
7061 }
7062 *p = '\0';
7063
7064 id = strstrip(start);
7065 val = strstrip(p + 1);
7066
7067 /* parse id */
7068 p = strchr(id, '.');
7069 if (p) {
7070 *p++ = '\0';
7071 force_ent->device = simple_strtoul(p, &endp, 10);
7072 if (p == endp || *endp != '\0') {
7073 *reason = "invalid device";
7074 return -EINVAL;
7075 }
7076 }
7077
7078 force_ent->port = simple_strtoul(id, &endp, 10);
7079 if (id == endp || *endp != '\0') {
7080 *reason = "invalid port/link";
7081 return -EINVAL;
7082 }
7083
7084 parse_val:
7085 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
7086 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
7087 const struct ata_force_param *fp = &force_tbl[i];
7088
7089 if (strncasecmp(val, fp->name, strlen(val)))
7090 continue;
7091
7092 nr_matches++;
7093 match_fp = fp;
7094
7095 if (strcasecmp(val, fp->name) == 0) {
7096 nr_matches = 1;
7097 break;
7098 }
7099 }
7100
7101 if (!nr_matches) {
7102 *reason = "unknown value";
7103 return -EINVAL;
7104 }
7105 if (nr_matches > 1) {
7106 *reason = "ambiguous value";
7107 return -EINVAL;
7108 }
7109
7110 force_ent->param = *match_fp;
7111
7112 return 0;
7113 }
7114
7115 static void __init ata_parse_force_param(void)
7116 {
7117 int idx = 0, size = 1;
7118 int last_port = -1, last_device = -1;
7119 char *p, *cur, *next;
7120
7121 /* calculate maximum number of params and allocate force_tbl */
7122 for (p = ata_force_param_buf; *p; p++)
7123 if (*p == ',')
7124 size++;
7125
7126 ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
7127 if (!ata_force_tbl) {
7128 printk(KERN_WARNING "ata: failed to extend force table, "
7129 "libata.force ignored\n");
7130 return;
7131 }
7132
7133 /* parse and populate the table */
7134 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7135 const char *reason = "";
7136 struct ata_force_ent te = { .port = -1, .device = -1 };
7137
7138 next = cur;
7139 if (ata_parse_force_one(&next, &te, &reason)) {
7140 printk(KERN_WARNING "ata: failed to parse force "
7141 "parameter \"%s\" (%s)\n",
7142 cur, reason);
7143 continue;
7144 }
7145
7146 if (te.port == -1) {
7147 te.port = last_port;
7148 te.device = last_device;
7149 }
7150
7151 ata_force_tbl[idx++] = te;
7152
7153 last_port = te.port;
7154 last_device = te.device;
7155 }
7156
7157 ata_force_tbl_size = idx;
7158 }
7159
7160 static int __init ata_init(void)
7161 {
7162 int rc;
7163
7164 ata_parse_force_param();
7165
7166 rc = ata_sff_init();
7167 if (rc) {
7168 kfree(ata_force_tbl);
7169 return rc;
7170 }
7171
7172 libata_transport_init();
7173 ata_scsi_transport_template = ata_attach_transport();
7174 if (!ata_scsi_transport_template) {
7175 ata_sff_exit();
7176 rc = -ENOMEM;
7177 goto err_out;
7178 }
7179
7180 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7181 return 0;
7182
7183 err_out:
7184 return rc;
7185 }
7186
7187 static void __exit ata_exit(void)
7188 {
7189 ata_release_transport(ata_scsi_transport_template);
7190 libata_transport_exit();
7191 ata_sff_exit();
7192 kfree(ata_force_tbl);
7193 }
7194
7195 subsys_initcall(ata_init);
7196 module_exit(ata_exit);
7197
7198 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
7199
7200 int ata_ratelimit(void)
7201 {
7202 return __ratelimit(&ratelimit);
7203 }
7204 EXPORT_SYMBOL_GPL(ata_ratelimit);
7205
7206 /**
7207 * ata_msleep - ATA EH owner aware msleep
7208 * @ap: ATA port to attribute the sleep to
7209 * @msecs: duration to sleep in milliseconds
7210 *
7211 * Sleeps @msecs. If the current task is owner of @ap's EH, the
7212 * ownership is released before going to sleep and reacquired
7213 * after the sleep is complete. IOW, other ports sharing the
7214 * @ap->host will be allowed to own the EH while this task is
7215 * sleeping.
7216 *
7217 * LOCKING:
7218 * Might sleep.
7219 */
7220 void ata_msleep(struct ata_port *ap, unsigned int msecs)
7221 {
7222 bool owns_eh = ap && ap->host->eh_owner == current;
7223
7224 if (owns_eh)
7225 ata_eh_release(ap);
7226
7227 if (msecs < 20) {
7228 unsigned long usecs = msecs * USEC_PER_MSEC;
7229 usleep_range(usecs, usecs + 50);
7230 } else {
7231 msleep(msecs);
7232 }
7233
7234 if (owns_eh)
7235 ata_eh_acquire(ap);
7236 }
7237 EXPORT_SYMBOL_GPL(ata_msleep);
7238
7239 /**
7240 * ata_wait_register - wait until register value changes
7241 * @ap: ATA port to wait register for, can be NULL
7242 * @reg: IO-mapped register
7243 * @mask: Mask to apply to read register value
7244 * @val: Wait condition
7245 * @interval: polling interval in milliseconds
7246 * @timeout: timeout in milliseconds
7247 *
7248 * Waiting for some bits of register to change is a common
7249 * operation for ATA controllers. This function reads 32bit LE
7250 * IO-mapped register @reg and tests for the following condition.
7251 *
7252 * (*@reg & mask) != val
7253 *
7254 * If the condition is met, it returns; otherwise, the process is
7255 * repeated after @interval_msec until timeout.
7256 *
7257 * LOCKING:
7258 * Kernel thread context (may sleep)
7259 *
7260 * RETURNS:
7261 * The final register value.
7262 */
7263 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
7264 unsigned long interval, unsigned long timeout)
7265 {
7266 unsigned long deadline;
7267 u32 tmp;
7268
7269 tmp = ioread32(reg);
7270
7271 /* Calculate timeout _after_ the first read to make sure
7272 * preceding writes reach the controller before starting to
7273 * eat away the timeout.
7274 */
7275 deadline = ata_deadline(jiffies, timeout);
7276
7277 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
7278 ata_msleep(ap, interval);
7279 tmp = ioread32(reg);
7280 }
7281
7282 return tmp;
7283 }
7284 EXPORT_SYMBOL_GPL(ata_wait_register);
7285
7286 /**
7287 * sata_lpm_ignore_phy_events - test if PHY event should be ignored
7288 * @link: Link receiving the event
7289 *
7290 * Test whether the received PHY event has to be ignored or not.
7291 *
7292 * LOCKING:
7293 * None:
7294 *
7295 * RETURNS:
7296 * True if the event has to be ignored.
7297 */
7298 bool sata_lpm_ignore_phy_events(struct ata_link *link)
7299 {
7300 unsigned long lpm_timeout = link->last_lpm_change +
7301 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
7302
7303 /* if LPM is enabled, PHYRDY doesn't mean anything */
7304 if (link->lpm_policy > ATA_LPM_MAX_POWER)
7305 return true;
7306
7307 /* ignore the first PHY event after the LPM policy changed
7308 * as it is might be spurious
7309 */
7310 if ((link->flags & ATA_LFLAG_CHANGED) &&
7311 time_before(jiffies, lpm_timeout))
7312 return true;
7313
7314 return false;
7315 }
7316 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
7317
7318 /*
7319 * Dummy port_ops
7320 */
7321 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7322 {
7323 return AC_ERR_SYSTEM;
7324 }
7325
7326 static void ata_dummy_error_handler(struct ata_port *ap)
7327 {
7328 /* truly dummy */
7329 }
7330
7331 struct ata_port_operations ata_dummy_port_ops = {
7332 .qc_prep = ata_noop_qc_prep,
7333 .qc_issue = ata_dummy_qc_issue,
7334 .error_handler = ata_dummy_error_handler,
7335 .sched_eh = ata_std_sched_eh,
7336 .end_eh = ata_std_end_eh,
7337 };
7338 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7339
7340 const struct ata_port_info ata_dummy_port_info = {
7341 .port_ops = &ata_dummy_port_ops,
7342 };
7343 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7344
7345 /*
7346 * Utility print functions
7347 */
7348 void ata_port_printk(const struct ata_port *ap, const char *level,
7349 const char *fmt, ...)
7350 {
7351 struct va_format vaf;
7352 va_list args;
7353
7354 va_start(args, fmt);
7355
7356 vaf.fmt = fmt;
7357 vaf.va = &args;
7358
7359 printk("%sata%u: %pV", level, ap->print_id, &vaf);
7360
7361 va_end(args);
7362 }
7363 EXPORT_SYMBOL(ata_port_printk);
7364
7365 void ata_link_printk(const struct ata_link *link, const char *level,
7366 const char *fmt, ...)
7367 {
7368 struct va_format vaf;
7369 va_list args;
7370
7371 va_start(args, fmt);
7372
7373 vaf.fmt = fmt;
7374 vaf.va = &args;
7375
7376 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
7377 printk("%sata%u.%02u: %pV",
7378 level, link->ap->print_id, link->pmp, &vaf);
7379 else
7380 printk("%sata%u: %pV",
7381 level, link->ap->print_id, &vaf);
7382
7383 va_end(args);
7384 }
7385 EXPORT_SYMBOL(ata_link_printk);
7386
7387 void ata_dev_printk(const struct ata_device *dev, const char *level,
7388 const char *fmt, ...)
7389 {
7390 struct va_format vaf;
7391 va_list args;
7392
7393 va_start(args, fmt);
7394
7395 vaf.fmt = fmt;
7396 vaf.va = &args;
7397
7398 printk("%sata%u.%02u: %pV",
7399 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
7400 &vaf);
7401
7402 va_end(args);
7403 }
7404 EXPORT_SYMBOL(ata_dev_printk);
7405
7406 void ata_print_version(const struct device *dev, const char *version)
7407 {
7408 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
7409 }
7410 EXPORT_SYMBOL(ata_print_version);