]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/libata.h
libata: Fix ata_busy_wait() kernel docs
[mirror_ubuntu-jammy-kernel.git] / include / linux / libata.h
1 /*
2 * Copyright 2003-2005 Red Hat, Inc. All rights reserved.
3 * Copyright 2003-2005 Jeff Garzik
4 *
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 *
21 * libata documentation is available via 'make {ps|pdf}docs',
22 * as Documentation/DocBook/libata.*
23 *
24 */
25
26 #ifndef __LINUX_LIBATA_H__
27 #define __LINUX_LIBATA_H__
28
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/scatterlist.h>
34 #include <asm/io.h>
35 #include <linux/ata.h>
36 #include <linux/workqueue.h>
37 #include <scsi/scsi_host.h>
38
39 /*
40 * Define if arch has non-standard setup. This is a _PCI_ standard
41 * not a legacy or ISA standard.
42 */
43 #ifdef CONFIG_ATA_NONSTANDARD
44 #include <asm/libata-portmap.h>
45 #else
46 #include <asm-generic/libata-portmap.h>
47 #endif
48
49 /*
50 * compile-time options: to be removed as soon as all the drivers are
51 * converted to the new debugging mechanism
52 */
53 #undef ATA_DEBUG /* debugging output */
54 #undef ATA_VERBOSE_DEBUG /* yet more debugging output */
55 #undef ATA_IRQ_TRAP /* define to ack screaming irqs */
56 #undef ATA_NDEBUG /* define to disable quick runtime checks */
57 #define ATA_ENABLE_PATA /* define to enable PATA support in some
58 * low-level drivers */
59
60
61 /* note: prints function name for you */
62 #ifdef ATA_DEBUG
63 #define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
64 #ifdef ATA_VERBOSE_DEBUG
65 #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
66 #else
67 #define VPRINTK(fmt, args...)
68 #endif /* ATA_VERBOSE_DEBUG */
69 #else
70 #define DPRINTK(fmt, args...)
71 #define VPRINTK(fmt, args...)
72 #endif /* ATA_DEBUG */
73
74 #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
75
76 /* NEW: debug levels */
77 #define HAVE_LIBATA_MSG 1
78
79 enum {
80 ATA_MSG_DRV = 0x0001,
81 ATA_MSG_INFO = 0x0002,
82 ATA_MSG_PROBE = 0x0004,
83 ATA_MSG_WARN = 0x0008,
84 ATA_MSG_MALLOC = 0x0010,
85 ATA_MSG_CTL = 0x0020,
86 ATA_MSG_INTR = 0x0040,
87 ATA_MSG_ERR = 0x0080,
88 };
89
90 #define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
91 #define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
92 #define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
93 #define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
94 #define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
95 #define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
96 #define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
97 #define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
98
99 static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
100 {
101 if (dval < 0 || dval >= (sizeof(u32) * 8))
102 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
103 if (!dval)
104 return 0;
105 return (1 << dval) - 1;
106 }
107
108 /* defines only for the constants which don't work well as enums */
109 #define ATA_TAG_POISON 0xfafbfcfdU
110
111 /* move to PCI layer? */
112 #define PCI_VDEVICE(vendor, device) \
113 PCI_VENDOR_ID_##vendor, (device), \
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0
115
116 static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
117 {
118 return &pdev->dev;
119 }
120
121 enum {
122 /* various global constants */
123 LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
124 ATA_MAX_PORTS = 8,
125 ATA_DEF_QUEUE = 1,
126 /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
127 ATA_MAX_QUEUE = 32,
128 ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
129 ATA_MAX_BUS = 2,
130 ATA_DEF_BUSY_WAIT = 10000,
131 ATA_SHORT_PAUSE = (HZ >> 6) + 1,
132
133 ATA_SHT_EMULATED = 1,
134 ATA_SHT_CMD_PER_LUN = 1,
135 ATA_SHT_THIS_ID = -1,
136 ATA_SHT_USE_CLUSTERING = 1,
137
138 /* struct ata_device stuff */
139 ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
140 ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
141 ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */
142 ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */
143 ATA_DFLAG_FLUSH_EXT = (1 << 4), /* do FLUSH_EXT instead of FLUSH */
144 ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
145
146 ATA_DFLAG_PIO = (1 << 8), /* device limited to PIO mode */
147 ATA_DFLAG_NCQ_OFF = (1 << 9), /* device limited to non-NCQ mode */
148 ATA_DFLAG_SUSPENDED = (1 << 10), /* device suspended */
149 ATA_DFLAG_INIT_MASK = (1 << 16) - 1,
150
151 ATA_DFLAG_DETACH = (1 << 16),
152 ATA_DFLAG_DETACHED = (1 << 17),
153
154 ATA_DEV_UNKNOWN = 0, /* unknown device */
155 ATA_DEV_ATA = 1, /* ATA device */
156 ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */
157 ATA_DEV_ATAPI = 3, /* ATAPI device */
158 ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */
159 ATA_DEV_NONE = 5, /* no device */
160
161 /* struct ata_port flags */
162 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
163 /* (doesn't imply presence) */
164 ATA_FLAG_SATA = (1 << 1),
165 ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */
166 ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */
167 ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
168 ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */
169 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
170 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
171 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
172 ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD
173 * doesn't handle PIO interrupts */
174 ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */
175 ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */
176 ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H
177 * Register FIS clearing BSY */
178 ATA_FLAG_DEBUGMSG = (1 << 13),
179 ATA_FLAG_SETXFER_POLLING= (1 << 14), /* use polling for SETXFER */
180 ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */
181
182 /* The following flag belongs to ap->pflags but is kept in
183 * ap->flags because it's referenced in many LLDs and will be
184 * removed in not-too-distant future.
185 */
186 ATA_FLAG_DISABLED = (1 << 23), /* port is disabled, ignore it */
187
188 /* bits 24:31 of ap->flags are reserved for LLD specific flags */
189
190 /* struct ata_port pflags */
191 ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */
192 ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */
193 ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */
194 ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */
195 ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */
196 ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */
197 ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */
198
199 ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */
200 ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
201 ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
202
203 /* struct ata_queued_cmd flags */
204 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
205 ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */
206 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */
207 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
208 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
209 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
210
211 ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
212 ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
213 ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
214
215 /* host set flags */
216 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */
217
218 /* various lengths of time */
219 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
220 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
221 ATA_TMOUT_INTERNAL = 30 * HZ,
222 ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
223
224 /* ATA bus states */
225 BUS_UNKNOWN = 0,
226 BUS_DMA = 1,
227 BUS_IDLE = 2,
228 BUS_NOINTR = 3,
229 BUS_NODATA = 4,
230 BUS_TIMER = 5,
231 BUS_PIO = 6,
232 BUS_EDD = 7,
233 BUS_IDENTIFY = 8,
234 BUS_PACKET = 9,
235
236 /* SATA port states */
237 PORT_UNKNOWN = 0,
238 PORT_ENABLED = 1,
239 PORT_DISABLED = 2,
240
241 /* encoding various smaller bitmaps into a single
242 * unsigned int bitmap
243 */
244 ATA_BITS_PIO = 7,
245 ATA_BITS_MWDMA = 5,
246 ATA_BITS_UDMA = 8,
247
248 ATA_SHIFT_PIO = 0,
249 ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_BITS_PIO,
250 ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_BITS_MWDMA,
251
252 ATA_MASK_PIO = ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO,
253 ATA_MASK_MWDMA = ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA,
254 ATA_MASK_UDMA = ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA,
255
256 /* size of buffer to pad xfers ending on unaligned boundaries */
257 ATA_DMA_PAD_SZ = 4,
258 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
259
260 /* masks for port functions */
261 ATA_PORT_PRIMARY = (1 << 0),
262 ATA_PORT_SECONDARY = (1 << 1),
263
264 /* ering size */
265 ATA_ERING_SIZE = 32,
266
267 /* desc_len for ata_eh_info and context */
268 ATA_EH_DESC_LEN = 80,
269
270 /* reset / recovery action types */
271 ATA_EH_REVALIDATE = (1 << 0),
272 ATA_EH_SOFTRESET = (1 << 1),
273 ATA_EH_HARDRESET = (1 << 2),
274 ATA_EH_SUSPEND = (1 << 3),
275 ATA_EH_RESUME = (1 << 4),
276 ATA_EH_PM_FREEZE = (1 << 5),
277
278 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
279 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_SUSPEND |
280 ATA_EH_RESUME | ATA_EH_PM_FREEZE,
281
282 /* ata_eh_info->flags */
283 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
284 ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */
285 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
286 ATA_EHI_QUIET = (1 << 3), /* be quiet */
287
288 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */
289 ATA_EHI_PRINTINFO = (1 << 17), /* print configuration info */
290 ATA_EHI_SETMODE = (1 << 18), /* configure transfer mode */
291 ATA_EHI_POST_SETMODE = (1 << 19), /* revaildating after setmode */
292
293 ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK,
294
295 /* max repeat if error condition is still set after ->error_handler */
296 ATA_EH_MAX_REPEAT = 5,
297
298 /* how hard are we gonna try to probe/recover devices */
299 ATA_PROBE_MAX_TRIES = 3,
300 ATA_EH_RESET_TRIES = 3,
301 ATA_EH_DEV_TRIES = 3,
302
303 /* Drive spinup time (time from power-on to the first D2H FIS)
304 * in msecs - 8s currently. Failing to get ready in this time
305 * isn't critical. It will result in reset failure for
306 * controllers which can't wait for the first D2H FIS. libata
307 * will retry, so it just has to be long enough to spin up
308 * most devices.
309 */
310 ATA_SPINUP_WAIT = 8000,
311
312 /* Horkage types. May be set by libata or controller on drives
313 (some horkage may be drive/controller pair dependant */
314
315 ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */
316 ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */
317 ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */
318 };
319
320 enum hsm_task_states {
321 HSM_ST_IDLE, /* no command on going */
322 HSM_ST, /* (waiting the device to) transfer data */
323 HSM_ST_LAST, /* (waiting the device to) complete command */
324 HSM_ST_ERR, /* error */
325 HSM_ST_FIRST, /* (waiting the device to)
326 write CDB or first data block */
327 };
328
329 enum ata_completion_errors {
330 AC_ERR_DEV = (1 << 0), /* device reported error */
331 AC_ERR_HSM = (1 << 1), /* host state machine violation */
332 AC_ERR_TIMEOUT = (1 << 2), /* timeout */
333 AC_ERR_MEDIA = (1 << 3), /* media error */
334 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */
335 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */
336 AC_ERR_SYSTEM = (1 << 6), /* system error */
337 AC_ERR_INVALID = (1 << 7), /* invalid argument */
338 AC_ERR_OTHER = (1 << 8), /* unknown */
339 AC_ERR_NODEV_HINT = (1 << 9), /* polling device detection hint */
340 };
341
342 /* forward declarations */
343 struct scsi_device;
344 struct ata_port_operations;
345 struct ata_port;
346 struct ata_queued_cmd;
347
348 /* typedefs */
349 typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
350 typedef int (*ata_prereset_fn_t)(struct ata_port *ap);
351 typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes);
352 typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes);
353
354 struct ata_ioports {
355 unsigned long cmd_addr;
356 unsigned long data_addr;
357 unsigned long error_addr;
358 unsigned long feature_addr;
359 unsigned long nsect_addr;
360 unsigned long lbal_addr;
361 unsigned long lbam_addr;
362 unsigned long lbah_addr;
363 unsigned long device_addr;
364 unsigned long status_addr;
365 unsigned long command_addr;
366 unsigned long altstatus_addr;
367 unsigned long ctl_addr;
368 unsigned long bmdma_addr;
369 unsigned long scr_addr;
370 };
371
372 struct ata_probe_ent {
373 struct list_head node;
374 struct device *dev;
375 const struct ata_port_operations *port_ops;
376 struct scsi_host_template *sht;
377 struct ata_ioports port[ATA_MAX_PORTS];
378 unsigned int n_ports;
379 unsigned int dummy_port_mask;
380 unsigned int pio_mask;
381 unsigned int mwdma_mask;
382 unsigned int udma_mask;
383 unsigned long irq;
384 unsigned long irq2;
385 unsigned int irq_flags;
386 unsigned long port_flags;
387 unsigned long _host_flags;
388 void __iomem *mmio_base;
389 void *private_data;
390
391 /* port_info for the secondary port. Together with irq2, it's
392 * used to implement non-uniform secondary port. Currently,
393 * the only user is ata_piix combined mode. This workaround
394 * will be removed together with ata_probe_ent when init model
395 * is updated.
396 */
397 const struct ata_port_info *pinfo2;
398 };
399
400 struct ata_host {
401 spinlock_t lock;
402 struct device *dev;
403 unsigned long irq;
404 unsigned long irq2;
405 void __iomem *mmio_base;
406 unsigned int n_ports;
407 void *private_data;
408 const struct ata_port_operations *ops;
409 unsigned long flags;
410 int simplex_claimed; /* Keep seperate in case we
411 ever need to do this locked */
412 struct ata_port *ports[0];
413 };
414
415 struct ata_queued_cmd {
416 struct ata_port *ap;
417 struct ata_device *dev;
418
419 struct scsi_cmnd *scsicmd;
420 void (*scsidone)(struct scsi_cmnd *);
421
422 struct ata_taskfile tf;
423 u8 cdb[ATAPI_CDB_LEN];
424
425 unsigned long flags; /* ATA_QCFLAG_xxx */
426 unsigned int tag;
427 unsigned int n_elem;
428 unsigned int orig_n_elem;
429
430 int dma_dir;
431
432 unsigned int pad_len;
433
434 unsigned int nsect;
435 unsigned int cursect;
436
437 unsigned int nbytes;
438 unsigned int curbytes;
439
440 unsigned int cursg;
441 unsigned int cursg_ofs;
442
443 struct scatterlist sgent;
444 struct scatterlist pad_sgent;
445 void *buf_virt;
446
447 /* DO NOT iterate over __sg manually, use ata_for_each_sg() */
448 struct scatterlist *__sg;
449
450 unsigned int err_mask;
451 struct ata_taskfile result_tf;
452 ata_qc_cb_t complete_fn;
453
454 void *private_data;
455 };
456
457 struct ata_port_stats {
458 unsigned long unhandled_irq;
459 unsigned long idle_irq;
460 unsigned long rw_reqbuf;
461 };
462
463 struct ata_ering_entry {
464 int is_io;
465 unsigned int err_mask;
466 u64 timestamp;
467 };
468
469 struct ata_ering {
470 int cursor;
471 struct ata_ering_entry ring[ATA_ERING_SIZE];
472 };
473
474 struct ata_device {
475 struct ata_port *ap;
476 unsigned int devno; /* 0 or 1 */
477 unsigned long flags; /* ATA_DFLAG_xxx */
478 struct scsi_device *sdev; /* attached SCSI device */
479 /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
480 u64 n_sectors; /* size of device, if ATA */
481 unsigned int class; /* ATA_DEV_xxx */
482 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
483 u8 pio_mode;
484 u8 dma_mode;
485 u8 xfer_mode;
486 unsigned int xfer_shift; /* ATA_SHIFT_xxx */
487
488 unsigned int multi_count; /* sectors count for
489 READ/WRITE MULTIPLE */
490 unsigned int max_sectors; /* per-device max sectors */
491 unsigned int cdb_len;
492
493 /* per-dev xfer mask */
494 unsigned int pio_mask;
495 unsigned int mwdma_mask;
496 unsigned int udma_mask;
497
498 /* for CHS addressing */
499 u16 cylinders; /* Number of cylinders */
500 u16 heads; /* Number of heads */
501 u16 sectors; /* Number of sectors per track */
502
503 /* error history */
504 struct ata_ering ering;
505 unsigned int horkage; /* List of broken features */
506 };
507
508 /* Offset into struct ata_device. Fields above it are maintained
509 * acress device init. Fields below are zeroed.
510 */
511 #define ATA_DEVICE_CLEAR_OFFSET offsetof(struct ata_device, n_sectors)
512
513 struct ata_eh_info {
514 struct ata_device *dev; /* offending device */
515 u32 serror; /* SError from LLDD */
516 unsigned int err_mask; /* port-wide err_mask */
517 unsigned int action; /* ATA_EH_* action mask */
518 unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */
519 unsigned int flags; /* ATA_EHI_* flags */
520
521 unsigned long hotplug_timestamp;
522 unsigned int probe_mask;
523
524 char desc[ATA_EH_DESC_LEN];
525 int desc_len;
526 };
527
528 struct ata_eh_context {
529 struct ata_eh_info i;
530 int tries[ATA_MAX_DEVICES];
531 unsigned int classes[ATA_MAX_DEVICES];
532 unsigned int did_probe_mask;
533 };
534
535 struct ata_port {
536 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
537 const struct ata_port_operations *ops;
538 spinlock_t *lock;
539 unsigned long flags; /* ATA_FLAG_xxx */
540 unsigned int pflags; /* ATA_PFLAG_xxx */
541 unsigned int id; /* unique id req'd by scsi midlyr */
542 unsigned int port_no; /* unique port #; from zero */
543
544 struct ata_prd *prd; /* our SG list */
545 dma_addr_t prd_dma; /* and its DMA mapping */
546
547 void *pad; /* array of DMA pad buffers */
548 dma_addr_t pad_dma;
549
550 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
551
552 u8 ctl; /* cache of ATA control register */
553 u8 last_ctl; /* Cache last written value */
554 unsigned int pio_mask;
555 unsigned int mwdma_mask;
556 unsigned int udma_mask;
557 unsigned int cbl; /* cable type; ATA_CBL_xxx */
558 unsigned int hw_sata_spd_limit;
559 unsigned int sata_spd_limit; /* SATA PHY speed limit */
560
561 /* record runtime error info, protected by host lock */
562 struct ata_eh_info eh_info;
563 /* EH context owned by EH */
564 struct ata_eh_context eh_context;
565
566 struct ata_device device[ATA_MAX_DEVICES];
567
568 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
569 unsigned long qc_allocated;
570 unsigned int qc_active;
571
572 unsigned int active_tag;
573 u32 sactive;
574
575 struct ata_port_stats stats;
576 struct ata_host *host;
577 struct device *dev;
578
579 void *port_task_data;
580 struct delayed_work port_task;
581 struct delayed_work hotplug_task;
582 struct work_struct scsi_rescan_task;
583
584 unsigned int hsm_task_state;
585
586 u32 msg_enable;
587 struct list_head eh_done_q;
588 wait_queue_head_t eh_wait_q;
589
590 pm_message_t pm_mesg;
591 int *pm_result;
592
593 void *private_data;
594
595 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
596 };
597
598 struct ata_port_operations {
599 void (*port_disable) (struct ata_port *);
600
601 void (*dev_config) (struct ata_port *, struct ata_device *);
602
603 void (*set_piomode) (struct ata_port *, struct ata_device *);
604 void (*set_dmamode) (struct ata_port *, struct ata_device *);
605 unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long);
606
607 void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
608 void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
609
610 void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
611 u8 (*check_status)(struct ata_port *ap);
612 u8 (*check_altstatus)(struct ata_port *ap);
613 void (*dev_select)(struct ata_port *ap, unsigned int device);
614
615 void (*phy_reset) (struct ata_port *ap); /* obsolete */
616 int (*set_mode) (struct ata_port *ap, struct ata_device **r_failed_dev);
617
618 void (*post_set_mode) (struct ata_port *ap);
619
620 int (*check_atapi_dma) (struct ata_queued_cmd *qc);
621
622 void (*bmdma_setup) (struct ata_queued_cmd *qc);
623 void (*bmdma_start) (struct ata_queued_cmd *qc);
624
625 void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
626
627 void (*qc_prep) (struct ata_queued_cmd *qc);
628 unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
629
630 /* Error handlers. ->error_handler overrides ->eng_timeout and
631 * indicates that new-style EH is in place.
632 */
633 void (*eng_timeout) (struct ata_port *ap); /* obsolete */
634
635 void (*freeze) (struct ata_port *ap);
636 void (*thaw) (struct ata_port *ap);
637 void (*error_handler) (struct ata_port *ap);
638 void (*post_internal_cmd) (struct ata_queued_cmd *qc);
639
640 irq_handler_t irq_handler;
641 void (*irq_clear) (struct ata_port *);
642
643 u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
644 void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
645 u32 val);
646
647 int (*port_suspend) (struct ata_port *ap, pm_message_t mesg);
648 int (*port_resume) (struct ata_port *ap);
649
650 int (*port_start) (struct ata_port *ap);
651 void (*port_stop) (struct ata_port *ap);
652
653 void (*host_stop) (struct ata_host *host);
654
655 void (*bmdma_stop) (struct ata_queued_cmd *qc);
656 u8 (*bmdma_status) (struct ata_port *ap);
657 };
658
659 struct ata_port_info {
660 struct scsi_host_template *sht;
661 unsigned long flags;
662 unsigned long pio_mask;
663 unsigned long mwdma_mask;
664 unsigned long udma_mask;
665 const struct ata_port_operations *port_ops;
666 void *private_data;
667 };
668
669 struct ata_timing {
670 unsigned short mode; /* ATA mode */
671 unsigned short setup; /* t1 */
672 unsigned short act8b; /* t2 for 8-bit I/O */
673 unsigned short rec8b; /* t2i for 8-bit I/O */
674 unsigned short cyc8b; /* t0 for 8-bit I/O */
675 unsigned short active; /* t2 or tD */
676 unsigned short recover; /* t2i or tK */
677 unsigned short cycle; /* t0 */
678 unsigned short udma; /* t2CYCTYP/2 */
679 };
680
681 #define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin)
682
683 extern const unsigned long sata_deb_timing_normal[];
684 extern const unsigned long sata_deb_timing_hotplug[];
685 extern const unsigned long sata_deb_timing_long[];
686
687 extern const struct ata_port_operations ata_dummy_port_ops;
688
689 static inline const unsigned long *
690 sata_ehc_deb_timing(struct ata_eh_context *ehc)
691 {
692 if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
693 return sata_deb_timing_hotplug;
694 else
695 return sata_deb_timing_normal;
696 }
697
698 static inline int ata_port_is_dummy(struct ata_port *ap)
699 {
700 return ap->ops == &ata_dummy_port_ops;
701 }
702
703 extern void ata_port_probe(struct ata_port *);
704 extern void __sata_phy_reset(struct ata_port *ap);
705 extern void sata_phy_reset(struct ata_port *ap);
706 extern void ata_bus_reset(struct ata_port *ap);
707 extern int sata_set_spd(struct ata_port *ap);
708 extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param);
709 extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param);
710 extern int ata_std_prereset(struct ata_port *ap);
711 extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
712 extern int sata_port_hardreset(struct ata_port *ap,
713 const unsigned long *timing);
714 extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
715 extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
716 extern void ata_port_disable(struct ata_port *);
717 extern void ata_std_ports(struct ata_ioports *ioaddr);
718 #ifdef CONFIG_PCI
719 extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
720 unsigned int n_ports);
721 extern void ata_pci_remove_one (struct pci_dev *pdev);
722 extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg);
723 extern void ata_pci_device_do_resume(struct pci_dev *pdev);
724 extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
725 extern int ata_pci_device_resume(struct pci_dev *pdev);
726 extern int ata_pci_clear_simplex(struct pci_dev *pdev);
727 #endif /* CONFIG_PCI */
728 extern int ata_device_add(const struct ata_probe_ent *ent);
729 extern void ata_port_detach(struct ata_port *ap);
730 extern void ata_host_init(struct ata_host *, struct device *,
731 unsigned long, const struct ata_port_operations *);
732 extern void ata_host_remove(struct ata_host *host);
733 extern int ata_scsi_detect(struct scsi_host_template *sht);
734 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
735 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
736 extern int ata_scsi_release(struct Scsi_Host *host);
737 extern void ata_sas_port_destroy(struct ata_port *);
738 extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
739 struct ata_port_info *, struct Scsi_Host *);
740 extern int ata_sas_port_init(struct ata_port *);
741 extern int ata_sas_port_start(struct ata_port *ap);
742 extern void ata_sas_port_stop(struct ata_port *ap);
743 extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
744 extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
745 struct ata_port *ap);
746 extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
747 extern int sata_scr_valid(struct ata_port *ap);
748 extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
749 extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
750 extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
751 extern int ata_port_online(struct ata_port *ap);
752 extern int ata_port_offline(struct ata_port *ap);
753 extern int ata_scsi_device_resume(struct scsi_device *);
754 extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t mesg);
755 extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
756 extern void ata_host_resume(struct ata_host *host);
757 extern int ata_ratelimit(void);
758 extern int ata_busy_sleep(struct ata_port *ap,
759 unsigned long timeout_pat, unsigned long timeout);
760 extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn,
761 void *data, unsigned long delay);
762 extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
763 unsigned long interval_msec,
764 unsigned long timeout_msec);
765
766 /*
767 * Default driver ops implementations
768 */
769 extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
770 extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
771 extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp);
772 extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
773 extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device);
774 extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
775 extern u8 ata_check_status(struct ata_port *ap);
776 extern u8 ata_altstatus(struct ata_port *ap);
777 extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
778 extern int ata_port_start (struct ata_port *ap);
779 extern void ata_port_stop (struct ata_port *ap);
780 extern void ata_host_stop (struct ata_host *host);
781 extern irqreturn_t ata_interrupt (int irq, void *dev_instance);
782 extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
783 unsigned int buflen, int write_data);
784 extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
785 unsigned int buflen, int write_data);
786 extern void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
787 unsigned int buflen, int write_data);
788 extern void ata_qc_prep(struct ata_queued_cmd *qc);
789 extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
790 extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
791 extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
792 unsigned int buflen);
793 extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
794 unsigned int n_elem);
795 extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
796 extern void ata_id_string(const u16 *id, unsigned char *s,
797 unsigned int ofs, unsigned int len);
798 extern void ata_id_c_string(const u16 *id, unsigned char *s,
799 unsigned int ofs, unsigned int len);
800 extern unsigned long ata_device_blacklisted(const struct ata_device *dev);
801 extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
802 extern void ata_bmdma_start (struct ata_queued_cmd *qc);
803 extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
804 extern u8 ata_bmdma_status(struct ata_port *ap);
805 extern void ata_bmdma_irq_clear(struct ata_port *ap);
806 extern void ata_bmdma_freeze(struct ata_port *ap);
807 extern void ata_bmdma_thaw(struct ata_port *ap);
808 extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
809 ata_reset_fn_t softreset,
810 ata_reset_fn_t hardreset,
811 ata_postreset_fn_t postreset);
812 extern void ata_bmdma_error_handler(struct ata_port *ap);
813 extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
814 extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
815 u8 status, int in_wq);
816 extern void ata_qc_complete(struct ata_queued_cmd *qc);
817 extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
818 void (*finish_qc)(struct ata_queued_cmd *));
819 extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
820 void (*done)(struct scsi_cmnd *));
821 extern int ata_std_bios_param(struct scsi_device *sdev,
822 struct block_device *bdev,
823 sector_t capacity, int geom[]);
824 extern int ata_scsi_slave_config(struct scsi_device *sdev);
825 extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
826 extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
827 int queue_depth);
828 extern struct ata_device *ata_dev_pair(struct ata_device *adev);
829
830 /*
831 * Timing helpers
832 */
833
834 extern unsigned int ata_pio_need_iordy(const struct ata_device *);
835 extern int ata_timing_compute(struct ata_device *, unsigned short,
836 struct ata_timing *, int, int);
837 extern void ata_timing_merge(const struct ata_timing *,
838 const struct ata_timing *, struct ata_timing *,
839 unsigned int);
840
841 enum {
842 ATA_TIMING_SETUP = (1 << 0),
843 ATA_TIMING_ACT8B = (1 << 1),
844 ATA_TIMING_REC8B = (1 << 2),
845 ATA_TIMING_CYC8B = (1 << 3),
846 ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
847 ATA_TIMING_CYC8B,
848 ATA_TIMING_ACTIVE = (1 << 4),
849 ATA_TIMING_RECOVER = (1 << 5),
850 ATA_TIMING_CYCLE = (1 << 6),
851 ATA_TIMING_UDMA = (1 << 7),
852 ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
853 ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
854 ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
855 ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
856 };
857
858
859 #ifdef CONFIG_PCI
860 struct pci_bits {
861 unsigned int reg; /* PCI config register to read */
862 unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
863 unsigned long mask;
864 unsigned long val;
865 };
866
867 extern void ata_pci_host_stop (struct ata_host *host);
868 extern struct ata_probe_ent *
869 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
870 extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
871 extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
872 #endif /* CONFIG_PCI */
873
874 /*
875 * EH
876 */
877 extern void ata_eng_timeout(struct ata_port *ap);
878
879 extern void ata_port_schedule_eh(struct ata_port *ap);
880 extern int ata_port_abort(struct ata_port *ap);
881 extern int ata_port_freeze(struct ata_port *ap);
882
883 extern void ata_eh_freeze_port(struct ata_port *ap);
884 extern void ata_eh_thaw_port(struct ata_port *ap);
885
886 extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
887 extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
888
889 extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
890 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
891 ata_postreset_fn_t postreset);
892
893 /*
894 * printk helpers
895 */
896 #define ata_port_printk(ap, lv, fmt, args...) \
897 printk(lv"ata%u: "fmt, (ap)->id , ##args)
898
899 #define ata_dev_printk(dev, lv, fmt, args...) \
900 printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
901
902 /*
903 * ata_eh_info helpers
904 */
905 #define ata_ehi_push_desc(ehi, fmt, args...) do { \
906 (ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \
907 ATA_EH_DESC_LEN - (ehi)->desc_len, \
908 fmt , ##args); \
909 } while (0)
910
911 #define ata_ehi_clear_desc(ehi) do { \
912 (ehi)->desc[0] = '\0'; \
913 (ehi)->desc_len = 0; \
914 } while (0)
915
916 static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi)
917 {
918 if (ehi->flags & ATA_EHI_HOTPLUGGED)
919 return;
920
921 ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK;
922 ehi->hotplug_timestamp = jiffies;
923
924 ehi->action |= ATA_EH_SOFTRESET;
925 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
926 }
927
928 static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
929 {
930 __ata_ehi_hotplugged(ehi);
931 ehi->err_mask |= AC_ERR_ATA_BUS;
932 }
933
934 /*
935 * qc helpers
936 */
937 static inline int
938 ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
939 {
940 if (sg == &qc->pad_sgent)
941 return 1;
942 if (qc->pad_len)
943 return 0;
944 if (((sg - qc->__sg) + 1) == qc->n_elem)
945 return 1;
946 return 0;
947 }
948
949 static inline struct scatterlist *
950 ata_qc_first_sg(struct ata_queued_cmd *qc)
951 {
952 if (qc->n_elem)
953 return qc->__sg;
954 if (qc->pad_len)
955 return &qc->pad_sgent;
956 return NULL;
957 }
958
959 static inline struct scatterlist *
960 ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
961 {
962 if (sg == &qc->pad_sgent)
963 return NULL;
964 if (++sg - qc->__sg < qc->n_elem)
965 return sg;
966 if (qc->pad_len)
967 return &qc->pad_sgent;
968 return NULL;
969 }
970
971 #define ata_for_each_sg(sg, qc) \
972 for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc))
973
974 static inline unsigned int ata_tag_valid(unsigned int tag)
975 {
976 return (tag < ATA_MAX_QUEUE) ? 1 : 0;
977 }
978
979 static inline unsigned int ata_tag_internal(unsigned int tag)
980 {
981 return tag == ATA_MAX_QUEUE - 1;
982 }
983
984 /*
985 * device helpers
986 */
987 static inline unsigned int ata_class_enabled(unsigned int class)
988 {
989 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
990 }
991
992 static inline unsigned int ata_class_disabled(unsigned int class)
993 {
994 return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP;
995 }
996
997 static inline unsigned int ata_class_absent(unsigned int class)
998 {
999 return !ata_class_enabled(class) && !ata_class_disabled(class);
1000 }
1001
1002 static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
1003 {
1004 return ata_class_enabled(dev->class);
1005 }
1006
1007 static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
1008 {
1009 return ata_class_disabled(dev->class);
1010 }
1011
1012 static inline unsigned int ata_dev_absent(const struct ata_device *dev)
1013 {
1014 return ata_class_absent(dev->class);
1015 }
1016
1017 static inline unsigned int ata_dev_ready(const struct ata_device *dev)
1018 {
1019 return ata_dev_enabled(dev) && !(dev->flags & ATA_DFLAG_SUSPENDED);
1020 }
1021
1022 /*
1023 * port helpers
1024 */
1025 static inline int ata_port_max_devices(const struct ata_port *ap)
1026 {
1027 if (ap->flags & ATA_FLAG_SLAVE_POSS)
1028 return 2;
1029 return 1;
1030 }
1031
1032
1033 static inline u8 ata_chk_status(struct ata_port *ap)
1034 {
1035 return ap->ops->check_status(ap);
1036 }
1037
1038
1039 /**
1040 * ata_pause - Flush writes and pause 400 nanoseconds.
1041 * @ap: Port to wait for.
1042 *
1043 * LOCKING:
1044 * Inherited from caller.
1045 */
1046
1047 static inline void ata_pause(struct ata_port *ap)
1048 {
1049 ata_altstatus(ap);
1050 ndelay(400);
1051 }
1052
1053
1054 /**
1055 * ata_busy_wait - Wait for a port status register
1056 * @ap: Port to wait for.
1057 * @bits: bits that must be clear
1058 * @max: number of 10uS waits to perform
1059 *
1060 * Waits up to max*10 microseconds for the selected bits in the port's
1061 * status register to be cleared.
1062 * Returns final value of status register.
1063 *
1064 * LOCKING:
1065 * Inherited from caller.
1066 */
1067
1068 static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits,
1069 unsigned int max)
1070 {
1071 u8 status;
1072
1073 do {
1074 udelay(10);
1075 status = ata_chk_status(ap);
1076 max--;
1077 } while (status != 0xff && (status & bits) && (max > 0));
1078
1079 return status;
1080 }
1081
1082
1083 /**
1084 * ata_wait_idle - Wait for a port to be idle.
1085 * @ap: Port to wait for.
1086 *
1087 * Waits up to 10ms for port's BUSY and DRQ signals to clear.
1088 * Returns final value of status register.
1089 *
1090 * LOCKING:
1091 * Inherited from caller.
1092 */
1093
1094 static inline u8 ata_wait_idle(struct ata_port *ap)
1095 {
1096 u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
1097
1098 if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) {
1099 unsigned long l = ap->ioaddr.status_addr;
1100 if (ata_msg_warn(ap))
1101 printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
1102 status, l);
1103 }
1104
1105 return status;
1106 }
1107
1108 static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
1109 {
1110 qc->tf.ctl |= ATA_NIEN;
1111 }
1112
1113 static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
1114 unsigned int tag)
1115 {
1116 if (likely(ata_tag_valid(tag)))
1117 return &ap->qcmd[tag];
1118 return NULL;
1119 }
1120
1121 static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
1122 unsigned int tag)
1123 {
1124 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1125
1126 if (unlikely(!qc) || !ap->ops->error_handler)
1127 return qc;
1128
1129 if ((qc->flags & (ATA_QCFLAG_ACTIVE |
1130 ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
1131 return qc;
1132
1133 return NULL;
1134 }
1135
1136 static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
1137 {
1138 memset(tf, 0, sizeof(*tf));
1139
1140 tf->ctl = dev->ap->ctl;
1141 if (dev->devno == 0)
1142 tf->device = ATA_DEVICE_OBS;
1143 else
1144 tf->device = ATA_DEVICE_OBS | ATA_DEV1;
1145 }
1146
1147 static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
1148 {
1149 qc->dma_dir = DMA_NONE;
1150 qc->__sg = NULL;
1151 qc->flags = 0;
1152 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
1153 qc->nsect = 0;
1154 qc->nbytes = qc->curbytes = 0;
1155 qc->n_elem = 0;
1156 qc->err_mask = 0;
1157 qc->pad_len = 0;
1158
1159 ata_tf_init(qc->dev, &qc->tf);
1160
1161 /* init result_tf such that it indicates normal completion */
1162 qc->result_tf.command = ATA_DRDY;
1163 qc->result_tf.feature = 0;
1164 }
1165
1166 /**
1167 * ata_irq_ack - Acknowledge a device interrupt.
1168 * @ap: Port on which interrupts are enabled.
1169 *
1170 * Wait up to 10 ms for legacy IDE device to become idle (BUSY
1171 * or BUSY+DRQ clear). Obtain dma status and port status from
1172 * device. Clear the interrupt. Return port status.
1173 *
1174 * LOCKING:
1175 */
1176
1177 static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
1178 {
1179 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
1180 u8 host_stat, post_stat, status;
1181
1182 status = ata_busy_wait(ap, bits, 1000);
1183 if (status & bits)
1184 if (ata_msg_err(ap))
1185 printk(KERN_ERR "abnormal status 0x%X\n", status);
1186
1187 /* get controller status; clear intr, err bits */
1188 if (ap->flags & ATA_FLAG_MMIO) {
1189 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
1190 host_stat = readb(mmio + ATA_DMA_STATUS);
1191 writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
1192 mmio + ATA_DMA_STATUS);
1193
1194 post_stat = readb(mmio + ATA_DMA_STATUS);
1195 } else {
1196 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1197 outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
1198 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1199
1200 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
1201 }
1202
1203 if (ata_msg_intr(ap))
1204 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
1205 __FUNCTION__,
1206 host_stat, post_stat, status);
1207
1208 return status;
1209 }
1210
1211 static inline int ata_try_flush_cache(const struct ata_device *dev)
1212 {
1213 return ata_id_wcache_enabled(dev->id) ||
1214 ata_id_has_flush(dev->id) ||
1215 ata_id_has_flush_ext(dev->id);
1216 }
1217
1218 static inline unsigned int ac_err_mask(u8 status)
1219 {
1220 if (status & (ATA_BUSY | ATA_DRQ))
1221 return AC_ERR_HSM;
1222 if (status & (ATA_ERR | ATA_DF))
1223 return AC_ERR_DEV;
1224 return 0;
1225 }
1226
1227 static inline unsigned int __ac_err_mask(u8 status)
1228 {
1229 unsigned int mask = ac_err_mask(status);
1230 if (mask == 0)
1231 return AC_ERR_OTHER;
1232 return mask;
1233 }
1234
1235 static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev)
1236 {
1237 ap->pad_dma = 0;
1238 ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ,
1239 &ap->pad_dma, GFP_KERNEL);
1240 return (ap->pad == NULL) ? -ENOMEM : 0;
1241 }
1242
1243 static inline void ata_pad_free(struct ata_port *ap, struct device *dev)
1244 {
1245 dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
1246 }
1247
1248 static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
1249 {
1250 return (struct ata_port *) &host->hostdata[0];
1251 }
1252
1253 #endif /* __LINUX_LIBATA_H__ */