]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/ata/sata_dwc_460ex.c
sata_dwc_460ex: join messages back
[mirror_ubuntu-bionic-kernel.git] / drivers / ata / sata_dwc_460ex.c
CommitLineData
62936009
RS
1/*
2 * drivers/ata/sata_dwc_460ex.c
3 *
4 * Synopsys DesignWare Cores (DWC) SATA host driver
5 *
6 * Author: Mark Miesfeld <mmiesfeld@amcc.com>
7 *
8 * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
9 * Copyright 2008 DENX Software Engineering
10 *
11 * Based on versions provided by AMCC and Synopsys which are:
12 * Copyright 2006 Applied Micro Circuits Corporation
13 * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 */
20
21#ifdef CONFIG_SATA_DWC_DEBUG
22#define DEBUG
23#endif
24
25#ifdef CONFIG_SATA_DWC_VDEBUG
26#define VERBOSE_DEBUG
27#define DEBUG_NCQ
28#endif
29
30#include <linux/kernel.h>
31#include <linux/module.h>
62936009 32#include <linux/device.h>
c11eede6
RH
33#include <linux/of_address.h>
34#include <linux/of_irq.h>
62936009
RS
35#include <linux/of_platform.h>
36#include <linux/platform_device.h>
37#include <linux/libata.h>
38#include <linux/slab.h>
39#include "libata.h"
40
41#include <scsi/scsi_host.h>
42#include <scsi/scsi_cmnd.h>
43
c211962d
SS
44/* These two are defined in "libata.h" */
45#undef DRV_NAME
46#undef DRV_VERSION
72d5f2da 47
62936009 48#define DRV_NAME "sata-dwc"
84b47e3b 49#define DRV_VERSION "1.3"
62936009 50
84683a7e
AS
51#ifndef out_le32
52#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (void __iomem *)(a))
53#endif
54
55#ifndef in_le32
56#define in_le32(a) __le32_to_cpu(__raw_readl((void __iomem *)(a)))
57#endif
58
59#ifndef NO_IRQ
60#define NO_IRQ 0
61#endif
62
62936009
RS
63/* SATA DMA driver Globals */
64#define DMA_NUM_CHANS 1
65#define DMA_NUM_CHAN_REGS 8
66
67/* SATA DMA Register definitions */
68#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/
69
70struct dmareg {
71 u32 low; /* Low bits 0-31 */
72 u32 high; /* High bits 32-63 */
73};
74
75/* DMA Per Channel registers */
76struct dma_chan_regs {
77 struct dmareg sar; /* Source Address */
78 struct dmareg dar; /* Destination address */
79 struct dmareg llp; /* Linked List Pointer */
80 struct dmareg ctl; /* Control */
81 struct dmareg sstat; /* Source Status not implemented in core */
82 struct dmareg dstat; /* Destination Status not implemented in core*/
83 struct dmareg sstatar; /* Source Status Address not impl in core */
84 struct dmareg dstatar; /* Destination Status Address not implemente */
85 struct dmareg cfg; /* Config */
86 struct dmareg sgr; /* Source Gather */
87 struct dmareg dsr; /* Destination Scatter */
88};
89
90/* Generic Interrupt Registers */
91struct dma_interrupt_regs {
92 struct dmareg tfr; /* Transfer Interrupt */
93 struct dmareg block; /* Block Interrupt */
94 struct dmareg srctran; /* Source Transfer Interrupt */
95 struct dmareg dsttran; /* Dest Transfer Interrupt */
96 struct dmareg error; /* Error */
97};
98
99struct ahb_dma_regs {
100 struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS];
101 struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */
102 struct dma_interrupt_regs interrupt_status; /* Interrupt Status */
103 struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */
104 struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */
105 struct dmareg statusInt; /* Interrupt combined*/
106 struct dmareg rq_srcreg; /* Src Trans Req */
107 struct dmareg rq_dstreg; /* Dst Trans Req */
108 struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req*/
109 struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req*/
110 struct dmareg rq_lst_srcreg; /* Last Src Trans Req*/
111 struct dmareg rq_lst_dstreg; /* Last Dst Trans Req*/
112 struct dmareg dma_cfg; /* DMA Config */
113 struct dmareg dma_chan_en; /* DMA Channel Enable*/
114 struct dmareg dma_id; /* DMA ID */
115 struct dmareg dma_test; /* DMA Test */
116 struct dmareg res1; /* reserved */
117 struct dmareg res2; /* reserved */
118 /*
119 * DMA Comp Params
120 * Param 6 = dma_param[0], Param 5 = dma_param[1],
121 * Param 4 = dma_param[2] ...
122 */
123 struct dmareg dma_params[6];
124};
125
126/* Data structure for linked list item */
127struct lli {
128 u32 sar; /* Source Address */
129 u32 dar; /* Destination address */
130 u32 llp; /* Linked List Pointer */
131 struct dmareg ctl; /* Control */
132 struct dmareg dstat; /* Destination Status */
133};
134
135enum {
136 SATA_DWC_DMAC_LLI_SZ = (sizeof(struct lli)),
137 SATA_DWC_DMAC_LLI_NUM = 256,
138 SATA_DWC_DMAC_LLI_TBL_SZ = (SATA_DWC_DMAC_LLI_SZ * \
139 SATA_DWC_DMAC_LLI_NUM),
140 SATA_DWC_DMAC_TWIDTH_BYTES = 4,
141 SATA_DWC_DMAC_CTRL_TSIZE_MAX = (0x00000800 * \
142 SATA_DWC_DMAC_TWIDTH_BYTES),
143};
144
145/* DMA Register Operation Bits */
146enum {
147 DMA_EN = 0x00000001, /* Enable AHB DMA */
148 DMA_CTL_LLP_SRCEN = 0x10000000, /* Blk chain enable Src */
149 DMA_CTL_LLP_DSTEN = 0x08000000, /* Blk chain enable Dst */
150};
151
152#define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */
153#define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */
154 /* Enable channel */
155#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \
156 ((0x000000001 << (ch)) << 8))
157 /* Disable channel */
158#define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8))
159 /* Transfer Type & Flow Controller */
160#define DMA_CTL_TTFC(type) (((type) & 0x7) << 20)
161#define DMA_CTL_SMS(num) (((num) & 0x3) << 25) /* Src Master Select */
162#define DMA_CTL_DMS(num) (((num) & 0x3) << 23)/* Dst Master Select */
163 /* Src Burst Transaction Length */
164#define DMA_CTL_SRC_MSIZE(size) (((size) & 0x7) << 14)
165 /* Dst Burst Transaction Length */
166#define DMA_CTL_DST_MSIZE(size) (((size) & 0x7) << 11)
167 /* Source Transfer Width */
168#define DMA_CTL_SRC_TRWID(size) (((size) & 0x7) << 4)
169 /* Destination Transfer Width */
170#define DMA_CTL_DST_TRWID(size) (((size) & 0x7) << 1)
171
172/* Assign HW handshaking interface (x) to destination / source peripheral */
173#define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11)
174#define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7)
dc7f71f4 175#define DMA_CFG_HW_CH_PRIOR(int_num) (((int_num) & 0xF) << 5)
62936009
RS
176#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master))
177
178/*
179 * This define is used to set block chaining disabled in the control low
180 * register. It is already in little endian format so it can be &'d dirctly.
181 * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN))
182 */
183enum {
184 DMA_CTL_LLP_DISABLE_LE32 = 0xffffffe7,
185 DMA_CTL_TTFC_P2M_DMAC = 0x00000002, /* Per to mem, DMAC cntr */
186 DMA_CTL_TTFC_M2P_PER = 0x00000003, /* Mem to per, peripheral cntr */
187 DMA_CTL_SINC_INC = 0x00000000, /* Source Address Increment */
188 DMA_CTL_SINC_DEC = 0x00000200,
189 DMA_CTL_SINC_NOCHANGE = 0x00000400,
190 DMA_CTL_DINC_INC = 0x00000000, /* Destination Address Increment */
191 DMA_CTL_DINC_DEC = 0x00000080,
192 DMA_CTL_DINC_NOCHANGE = 0x00000100,
193 DMA_CTL_INT_EN = 0x00000001, /* Interrupt Enable */
194
195/* Channel Configuration Register high bits */
196 DMA_CFG_FCMOD_REQ = 0x00000001, /* Flow Control - request based */
197 DMA_CFG_PROTCTL = (0x00000003 << 2),/* Protection Control */
198
199/* Channel Configuration Register low bits */
200 DMA_CFG_RELD_DST = 0x80000000, /* Reload Dest / Src Addr */
201 DMA_CFG_RELD_SRC = 0x40000000,
202 DMA_CFG_HS_SELSRC = 0x00000800, /* Software handshake Src/ Dest */
203 DMA_CFG_HS_SELDST = 0x00000400,
204 DMA_CFG_FIFOEMPTY = (0x00000001 << 9), /* FIFO Empty bit */
205
206/* Channel Linked List Pointer Register */
207 DMA_LLP_AHBMASTER1 = 0, /* List Master Select */
208 DMA_LLP_AHBMASTER2 = 1,
209
210 SATA_DWC_MAX_PORTS = 1,
211
212 SATA_DWC_SCR_OFFSET = 0x24,
213 SATA_DWC_REG_OFFSET = 0x64,
214};
215
216/* DWC SATA Registers */
217struct sata_dwc_regs {
218 u32 fptagr; /* 1st party DMA tag */
219 u32 fpbor; /* 1st party DMA buffer offset */
220 u32 fptcr; /* 1st party DMA Xfr count */
221 u32 dmacr; /* DMA Control */
222 u32 dbtsr; /* DMA Burst Transac size */
223 u32 intpr; /* Interrupt Pending */
224 u32 intmr; /* Interrupt Mask */
225 u32 errmr; /* Error Mask */
226 u32 llcr; /* Link Layer Control */
227 u32 phycr; /* PHY Control */
228 u32 physr; /* PHY Status */
229 u32 rxbistpd; /* Recvd BIST pattern def register */
230 u32 rxbistpd1; /* Recvd BIST data dword1 */
231 u32 rxbistpd2; /* Recvd BIST pattern data dword2 */
232 u32 txbistpd; /* Trans BIST pattern def register */
233 u32 txbistpd1; /* Trans BIST data dword1 */
234 u32 txbistpd2; /* Trans BIST data dword2 */
235 u32 bistcr; /* BIST Control Register */
236 u32 bistfctr; /* BIST FIS Count Register */
237 u32 bistsr; /* BIST Status Register */
238 u32 bistdecr; /* BIST Dword Error count register */
239 u32 res[15]; /* Reserved locations */
240 u32 testr; /* Test Register */
241 u32 versionr; /* Version Register */
242 u32 idr; /* ID Register */
243 u32 unimpl[192]; /* Unimplemented */
244 u32 dmadr[256]; /* FIFO Locations in DMA Mode */
245};
246
247enum {
248 SCR_SCONTROL_DET_ENABLE = 0x00000001,
249 SCR_SSTATUS_DET_PRESENT = 0x00000001,
250 SCR_SERROR_DIAG_X = 0x04000000,
251/* DWC SATA Register Operations */
252 SATA_DWC_TXFIFO_DEPTH = 0x01FF,
253 SATA_DWC_RXFIFO_DEPTH = 0x01FF,
254 SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004,
255 SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN),
256 SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN),
257 SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN,
258 SATA_DWC_INTPR_DMAT = 0x00000001,
259 SATA_DWC_INTPR_NEWFP = 0x00000002,
260 SATA_DWC_INTPR_PMABRT = 0x00000004,
261 SATA_DWC_INTPR_ERR = 0x00000008,
262 SATA_DWC_INTPR_NEWBIST = 0x00000010,
263 SATA_DWC_INTPR_IPF = 0x10000000,
264 SATA_DWC_INTMR_DMATM = 0x00000001,
265 SATA_DWC_INTMR_NEWFPM = 0x00000002,
266 SATA_DWC_INTMR_PMABRTM = 0x00000004,
267 SATA_DWC_INTMR_ERRM = 0x00000008,
268 SATA_DWC_INTMR_NEWBISTM = 0x00000010,
269 SATA_DWC_LLCR_SCRAMEN = 0x00000001,
270 SATA_DWC_LLCR_DESCRAMEN = 0x00000002,
271 SATA_DWC_LLCR_RPDEN = 0x00000004,
272/* This is all error bits, zero's are reserved fields. */
273 SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03
274};
275
276#define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F)
277#define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\
278 SATA_DWC_DMACR_TMOD_TXCHEN)
279#define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\
280 SATA_DWC_DMACR_TMOD_TXCHEN)
281#define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH)
282#define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
283 << 16)
284struct sata_dwc_device {
285 struct device *dev; /* generic device struct */
286 struct ata_probe_ent *pe; /* ptr to probe-ent */
287 struct ata_host *host;
d7c256e8 288 u8 __iomem *reg_base;
62936009
RS
289 struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
290 int irq_dma;
291};
292
293#define SATA_DWC_QCMD_MAX 32
294
295struct sata_dwc_device_port {
296 struct sata_dwc_device *hsdev;
297 int cmd_issued[SATA_DWC_QCMD_MAX];
298 struct lli *llit[SATA_DWC_QCMD_MAX]; /* DMA LLI table */
299 dma_addr_t llit_dma[SATA_DWC_QCMD_MAX];
300 u32 dma_chan[SATA_DWC_QCMD_MAX];
301 int dma_pending[SATA_DWC_QCMD_MAX];
302};
303
304/*
305 * Commonly used DWC SATA driver Macros
306 */
307#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)\
308 (host)->private_data)
309#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)\
310 (ap)->host->private_data)
311#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)\
312 (ap)->private_data)
313#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)\
314 (qc)->ap->host->private_data)
315#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)\
316 (hsdevp)->hsdev)
317
318enum {
319 SATA_DWC_CMD_ISSUED_NOT = 0,
320 SATA_DWC_CMD_ISSUED_PEND = 1,
321 SATA_DWC_CMD_ISSUED_EXEC = 2,
322 SATA_DWC_CMD_ISSUED_NODATA = 3,
323
324 SATA_DWC_DMA_PENDING_NONE = 0,
325 SATA_DWC_DMA_PENDING_TX = 1,
326 SATA_DWC_DMA_PENDING_RX = 2,
327};
328
329struct sata_dwc_host_priv {
330 void __iomem *scr_addr_sstatus;
331 u32 sata_dwc_sactive_issued ;
332 u32 sata_dwc_sactive_queued ;
333 u32 dma_interrupt_count;
334 struct ahb_dma_regs *sata_dma_regs;
335 struct device *dwc_dev;
dc7f71f4 336 int dma_channel;
62936009 337};
d7c256e8
AS
338
339static struct sata_dwc_host_priv host_pvt;
340
62936009
RS
341/*
342 * Prototypes
343 */
344static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
345static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
346 u32 check_status);
347static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
348static void sata_dwc_port_stop(struct ata_port *ap);
349static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
350static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq);
351static void dma_dwc_exit(struct sata_dwc_device *hsdev);
352static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
353 struct lli *lli, dma_addr_t dma_lli,
354 void __iomem *addr, int dir);
355static void dma_dwc_xfer_start(int dma_ch);
356
84b47e3b
SS
357static const char *get_prot_descript(u8 protocol)
358{
359 switch ((enum ata_tf_protocols)protocol) {
360 case ATA_PROT_NODATA:
361 return "ATA no data";
362 case ATA_PROT_PIO:
363 return "ATA PIO";
364 case ATA_PROT_DMA:
365 return "ATA DMA";
366 case ATA_PROT_NCQ:
367 return "ATA NCQ";
368 case ATAPI_PROT_NODATA:
369 return "ATAPI no data";
370 case ATAPI_PROT_PIO:
371 return "ATAPI PIO";
372 case ATAPI_PROT_DMA:
373 return "ATAPI DMA";
374 default:
375 return "unknown";
376 }
377}
378
379static const char *get_dma_dir_descript(int dma_dir)
380{
381 switch ((enum dma_data_direction)dma_dir) {
382 case DMA_BIDIRECTIONAL:
383 return "bidirectional";
384 case DMA_TO_DEVICE:
385 return "to device";
386 case DMA_FROM_DEVICE:
387 return "from device";
388 default:
389 return "none";
390 }
391}
392
62936009
RS
393static void sata_dwc_tf_dump(struct ata_taskfile *tf)
394{
d578514b
AS
395 dev_vdbg(host_pvt.dwc_dev,
396 "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx device: %x\n",
397 tf->command, get_prot_descript(tf->protocol), tf->flags,
398 tf->device);
399 dev_vdbg(host_pvt.dwc_dev,
400 "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam: 0x%x lbah: 0x%x\n",
401 tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah);
402 dev_vdbg(host_pvt.dwc_dev,
403 "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n",
62936009
RS
404 tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
405 tf->hob_lbah);
406}
407
408/*
409 * Function: get_burst_length_encode
410 * arguments: datalength: length in bytes of data
8618ccd3 411 * returns value to be programmed in register corresponding to data length
62936009
RS
412 * This value is effectively the log(base 2) of the length
413 */
414static int get_burst_length_encode(int datalength)
415{
416 int items = datalength >> 2; /* div by 4 to get lword count */
417
418 if (items >= 64)
419 return 5;
420
421 if (items >= 32)
422 return 4;
423
424 if (items >= 16)
425 return 3;
426
427 if (items >= 8)
428 return 2;
429
430 if (items >= 4)
431 return 1;
432
433 return 0;
434}
435
436static void clear_chan_interrupts(int c)
437{
438 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.tfr.low),
439 DMA_CHANNEL(c));
440 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.block.low),
441 DMA_CHANNEL(c));
442 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.srctran.low),
443 DMA_CHANNEL(c));
444 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.dsttran.low),
445 DMA_CHANNEL(c));
446 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.error.low),
447 DMA_CHANNEL(c));
448}
449
450/*
451 * Function: dma_request_channel
452 * arguments: None
453 * returns channel number if available else -1
454 * This function assigns the next available DMA channel from the list to the
455 * requester
456 */
457static int dma_request_channel(void)
458{
dc7f71f4
TN
459 /* Check if the channel is not currently in use */
460 if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) &
461 DMA_CHANNEL(host_pvt.dma_channel)))
462 return host_pvt.dma_channel;
463 dev_err(host_pvt.dwc_dev, "%s Channel %d is currently in use\n",
464 __func__, host_pvt.dma_channel);
62936009
RS
465 return -1;
466}
467
468/*
469 * Function: dma_dwc_interrupt
470 * arguments: irq, dev_id, pt_regs
471 * returns channel number if available else -1
472 * Interrupt Handler for DW AHB SATA DMA
473 */
474static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance)
475{
476 int chan;
477 u32 tfr_reg, err_reg;
478 unsigned long flags;
d5185d65 479 struct sata_dwc_device *hsdev = hsdev_instance;
62936009
RS
480 struct ata_host *host = (struct ata_host *)hsdev->host;
481 struct ata_port *ap;
482 struct sata_dwc_device_port *hsdevp;
483 u8 tag = 0;
484 unsigned int port = 0;
485
486 spin_lock_irqsave(&host->lock, flags);
487 ap = host->ports[port];
488 hsdevp = HSDEVP_FROM_AP(ap);
489 tag = ap->link.active_tag;
490
491 tfr_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.tfr\
492 .low));
493 err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error\
494 .low));
495
496 dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n",
497 tfr_reg, err_reg, hsdevp->dma_pending[tag], port);
498
dc7f71f4
TN
499 chan = host_pvt.dma_channel;
500 if (chan >= 0) {
62936009
RS
501 /* Check for end-of-transfer interrupt. */
502 if (tfr_reg & DMA_CHANNEL(chan)) {
503 /*
504 * Each DMA command produces 2 interrupts. Only
505 * complete the command after both interrupts have been
506 * seen. (See sata_dwc_isr())
507 */
508 host_pvt.dma_interrupt_count++;
509 sata_dwc_clear_dmacr(hsdevp, tag);
510
511 if (hsdevp->dma_pending[tag] ==
512 SATA_DWC_DMA_PENDING_NONE) {
513 dev_err(ap->dev, "DMA not pending eot=0x%08x "
514 "err=0x%08x tag=0x%02x pending=%d\n",
515 tfr_reg, err_reg, tag,
516 hsdevp->dma_pending[tag]);
517 }
518
519 if ((host_pvt.dma_interrupt_count % 2) == 0)
520 sata_dwc_dma_xfer_complete(ap, 1);
521
522 /* Clear the interrupt */
523 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\
524 .tfr.low),
525 DMA_CHANNEL(chan));
526 }
527
528 /* Check for error interrupt. */
529 if (err_reg & DMA_CHANNEL(chan)) {
530 /* TODO Need error handler ! */
531 dev_err(ap->dev, "error interrupt err_reg=0x%08x\n",
532 err_reg);
533
534 /* Clear the interrupt. */
535 out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\
536 .error.low),
537 DMA_CHANNEL(chan));
538 }
539 }
540 spin_unlock_irqrestore(&host->lock, flags);
541 return IRQ_HANDLED;
542}
543
544/*
545 * Function: dma_request_interrupts
546 * arguments: hsdev
547 * returns status
548 * This function registers ISR for a particular DMA channel interrupt
549 */
550static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq)
551{
552 int retval = 0;
dc7f71f4 553 int chan = host_pvt.dma_channel;
62936009 554
dc7f71f4 555 if (chan >= 0) {
62936009
RS
556 /* Unmask error interrupt */
557 out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low,
558 DMA_ENABLE_CHAN(chan));
559
560 /* Unmask end-of-transfer interrupt */
561 out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.tfr.low,
562 DMA_ENABLE_CHAN(chan));
563 }
564
565 retval = request_irq(irq, dma_dwc_interrupt, 0, "SATA DMA", hsdev);
566 if (retval) {
567 dev_err(host_pvt.dwc_dev, "%s: could not get IRQ %d\n",
568 __func__, irq);
569 return -ENODEV;
570 }
571
572 /* Mark this interrupt as requested */
573 hsdev->irq_dma = irq;
574 return 0;
575}
576
577/*
578 * Function: map_sg_to_lli
579 * The Synopsis driver has a comment proposing that better performance
580 * is possible by only enabling interrupts on the last item in the linked list.
581 * However, it seems that could be a problem if an error happened on one of the
582 * first items. The transfer would halt, but no error interrupt would occur.
583 * Currently this function sets interrupts enabled for each linked list item:
584 * DMA_CTL_INT_EN.
585 */
586static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
587 struct lli *lli, dma_addr_t dma_lli,
588 void __iomem *dmadr_addr, int dir)
589{
590 int i, idx = 0;
591 int fis_len = 0;
592 dma_addr_t next_llp;
593 int bl;
dc7f71f4 594 int sms_val, dms_val;
62936009 595
dc7f71f4
TN
596 sms_val = 0;
597 dms_val = 1 + host_pvt.dma_channel;
d7c256e8
AS
598 dev_dbg(host_pvt.dwc_dev,
599 "%s: sg=%p nelem=%d lli=%p dma_lli=0x%pad dmadr=0x%p\n",
600 __func__, sg, num_elems, lli, &dma_lli, dmadr_addr);
62936009
RS
601
602 bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);
603
604 for (i = 0; i < num_elems; i++, sg++) {
605 u32 addr, offset;
606 u32 sg_len, len;
607
608 addr = (u32) sg_dma_address(sg);
609 sg_len = sg_dma_len(sg);
610
611 dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len"
612 "=%d\n", __func__, i, addr, sg_len);
613
614 while (sg_len) {
615 if (idx >= SATA_DWC_DMAC_LLI_NUM) {
616 /* The LLI table is not large enough. */
617 dev_err(host_pvt.dwc_dev, "LLI table overrun "
618 "(idx=%d)\n", idx);
619 break;
620 }
621 len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
622 SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len;
623
624 offset = addr & 0xffff;
625 if ((offset + sg_len) > 0x10000)
626 len = 0x10000 - offset;
627
628 /*
629 * Make sure a LLI block is not created that will span
630 * 8K max FIS boundary. If the block spans such a FIS
631 * boundary, there is a chance that a DMA burst will
632 * cross that boundary -- this results in an error in
633 * the host controller.
634 */
635 if (fis_len + len > 8192) {
636 dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len="
637 "%d(0x%x) len=%d(0x%x)\n", fis_len,
638 fis_len, len, len);
639 len = 8192 - fis_len;
640 fis_len = 0;
641 } else {
642 fis_len += len;
643 }
644 if (fis_len == 8192)
645 fis_len = 0;
646
647 /*
648 * Set DMA addresses and lower half of control register
649 * based on direction.
650 */
651 if (dir == DMA_FROM_DEVICE) {
652 lli[idx].dar = cpu_to_le32(addr);
653 lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
654
655 lli[idx].ctl.low = cpu_to_le32(
656 DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
dc7f71f4
TN
657 DMA_CTL_SMS(sms_val) |
658 DMA_CTL_DMS(dms_val) |
62936009
RS
659 DMA_CTL_SRC_MSIZE(bl) |
660 DMA_CTL_DST_MSIZE(bl) |
661 DMA_CTL_SINC_NOCHANGE |
662 DMA_CTL_SRC_TRWID(2) |
663 DMA_CTL_DST_TRWID(2) |
664 DMA_CTL_INT_EN |
665 DMA_CTL_LLP_SRCEN |
666 DMA_CTL_LLP_DSTEN);
667 } else { /* DMA_TO_DEVICE */
668 lli[idx].sar = cpu_to_le32(addr);
669 lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
670
671 lli[idx].ctl.low = cpu_to_le32(
672 DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
dc7f71f4
TN
673 DMA_CTL_SMS(dms_val) |
674 DMA_CTL_DMS(sms_val) |
62936009
RS
675 DMA_CTL_SRC_MSIZE(bl) |
676 DMA_CTL_DST_MSIZE(bl) |
677 DMA_CTL_DINC_NOCHANGE |
678 DMA_CTL_SRC_TRWID(2) |
679 DMA_CTL_DST_TRWID(2) |
680 DMA_CTL_INT_EN |
681 DMA_CTL_LLP_SRCEN |
682 DMA_CTL_LLP_DSTEN);
683 }
684
685 dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: "
686 "0x%08x val: 0x%08x\n", __func__,
687 len, DMA_CTL_BLK_TS(len / 4));
688
689 /* Program the LLI CTL high register */
690 lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\
691 (len / 4));
692
693 /* Program the next pointer. The next pointer must be
694 * the physical address, not the virtual address.
695 */
696 next_llp = (dma_lli + ((idx + 1) * sizeof(struct \
697 lli)));
698
699 /* The last 2 bits encode the list master select. */
700 next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);
701
702 lli[idx].llp = cpu_to_le32(next_llp);
703 idx++;
704 sg_len -= len;
705 addr += len;
706 }
707 }
708
709 /*
710 * The last next ptr has to be zero and the last control low register
711 * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source
712 * and destination enable) set back to 0 (disabled.) This is what tells
713 * the core that this is the last item in the linked list.
714 */
715 if (idx) {
716 lli[idx-1].llp = 0x00000000;
717 lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;
718
719 /* Flush cache to memory */
720 dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx),
721 DMA_BIDIRECTIONAL);
722 }
723
724 return idx;
725}
726
727/*
728 * Function: dma_dwc_xfer_start
729 * arguments: Channel number
730 * Return : None
731 * Enables the DMA channel
732 */
733static void dma_dwc_xfer_start(int dma_ch)
734{
735 /* Enable the DMA channel */
736 out_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low),
737 in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) |
738 DMA_ENABLE_CHAN(dma_ch));
739}
740
741static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
742 struct lli *lli, dma_addr_t dma_lli,
743 void __iomem *addr, int dir)
744{
745 int dma_ch;
746 int num_lli;
747 /* Acquire DMA channel */
748 dma_ch = dma_request_channel();
749 if (dma_ch == -1) {
750 dev_err(host_pvt.dwc_dev, "%s: dma channel unavailable\n",
751 __func__);
752 return -EAGAIN;
753 }
754
755 /* Convert SG list to linked list of items (LLIs) for AHB DMA */
756 num_lli = map_sg_to_lli(sg, num_elems, lli, dma_lli, addr, dir);
757
758 dev_dbg(host_pvt.dwc_dev, "%s sg: 0x%p, count: %d lli: %p dma_lli:"
759 " 0x%0xlx addr: %p lli count: %d\n", __func__, sg, num_elems,
760 lli, (u32)dma_lli, addr, num_lli);
761
762 clear_chan_interrupts(dma_ch);
763
764 /* Program the CFG register. */
765 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high),
dc7f71f4 766 DMA_CFG_HW_HS_SRC(dma_ch) | DMA_CFG_HW_HS_DEST(dma_ch) |
62936009 767 DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ);
dc7f71f4
TN
768 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low),
769 DMA_CFG_HW_CH_PRIOR(dma_ch));
62936009
RS
770
771 /* Program the address of the linked list */
772 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low),
773 DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2));
774
775 /* Program the CTL register with src enable / dst enable */
776 out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low),
777 DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN);
d285e8bf 778 return dma_ch;
62936009
RS
779}
780
781/*
782 * Function: dma_dwc_exit
783 * arguments: None
784 * returns status
785 * This function exits the SATA DMA driver
786 */
787static void dma_dwc_exit(struct sata_dwc_device *hsdev)
788{
789 dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__);
04e506b5 790 if (host_pvt.sata_dma_regs) {
d7c256e8 791 iounmap((void __iomem *)host_pvt.sata_dma_regs);
04e506b5
VK
792 host_pvt.sata_dma_regs = NULL;
793 }
62936009 794
04e506b5 795 if (hsdev->irq_dma) {
62936009 796 free_irq(hsdev->irq_dma, hsdev);
04e506b5
VK
797 hsdev->irq_dma = 0;
798 }
62936009
RS
799}
800
801/*
802 * Function: dma_dwc_init
803 * arguments: hsdev
804 * returns status
805 * This function initializes the SATA DMA driver
806 */
807static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
808{
809 int err;
810
811 err = dma_request_interrupts(hsdev, irq);
812 if (err) {
813 dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
814 " %d\n", __func__, err);
4aaa7187 815 return err;
62936009
RS
816 }
817
818 /* Enabe DMA */
819 out_le32(&(host_pvt.sata_dma_regs->dma_cfg.low), DMA_EN);
820
821 dev_notice(host_pvt.dwc_dev, "DMA initialized\n");
822 dev_dbg(host_pvt.dwc_dev, "SATA DMA registers=0x%p\n", host_pvt.\
823 sata_dma_regs);
824
825 return 0;
62936009
RS
826}
827
828static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
829{
830 if (scr > SCR_NOTIFICATION) {
831 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
832 __func__, scr);
833 return -EINVAL;
834 }
835
d7c256e8 836 *val = in_le32(link->ap->ioaddr.scr_addr + (scr * 4));
62936009
RS
837 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
838 __func__, link->ap->print_id, scr, *val);
839
840 return 0;
841}
842
843static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
844{
845 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
846 __func__, link->ap->print_id, scr, val);
847 if (scr > SCR_NOTIFICATION) {
848 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
849 __func__, scr);
850 return -EINVAL;
851 }
d7c256e8 852 out_le32(link->ap->ioaddr.scr_addr + (scr * 4), val);
62936009
RS
853
854 return 0;
855}
856
857static u32 core_scr_read(unsigned int scr)
858{
d7c256e8 859 return in_le32(host_pvt.scr_addr_sstatus + (scr * 4));
62936009
RS
860}
861
862static void core_scr_write(unsigned int scr, u32 val)
863{
d7c256e8 864 out_le32(host_pvt.scr_addr_sstatus + (scr * 4), val);
62936009
RS
865}
866
867static void clear_serror(void)
868{
869 u32 val;
870 val = core_scr_read(SCR_ERROR);
871 core_scr_write(SCR_ERROR, val);
62936009
RS
872}
873
874static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
875{
876 out_le32(&hsdev->sata_dwc_regs->intpr,
877 in_le32(&hsdev->sata_dwc_regs->intpr));
878}
879
880static u32 qcmd_tag_to_mask(u8 tag)
881{
882 return 0x00000001 << (tag & 0x1f);
883}
884
885/* See ahci.c */
886static void sata_dwc_error_intr(struct ata_port *ap,
887 struct sata_dwc_device *hsdev, uint intpr)
888{
889 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
890 struct ata_eh_info *ehi = &ap->link.eh_info;
891 unsigned int err_mask = 0, action = 0;
892 struct ata_queued_cmd *qc;
893 u32 serror;
894 u8 status, tag;
895 u32 err_reg;
896
897 ata_ehi_clear_desc(ehi);
898
899 serror = core_scr_read(SCR_ERROR);
900 status = ap->ops->sff_check_status(ap);
901
902 err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error.\
903 low));
904 tag = ap->link.active_tag;
905
906 dev_err(ap->dev, "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x "
907 "dma_intp=%d pending=%d issued=%d dma_err_status=0x%08x\n",
908 __func__, serror, intpr, status, host_pvt.dma_interrupt_count,
909 hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag], err_reg);
910
911 /* Clear error register and interrupt bit */
912 clear_serror();
913 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
914
915 /* This is the only error happening now. TODO check for exact error */
916
917 err_mask |= AC_ERR_HOST_BUS;
918 action |= ATA_EH_RESET;
919
920 /* Pass this on to EH */
921 ehi->serror |= serror;
922 ehi->action |= action;
923
924 qc = ata_qc_from_tag(ap, tag);
925 if (qc)
926 qc->err_mask |= err_mask;
927 else
928 ehi->err_mask |= err_mask;
929
930 ata_port_abort(ap);
931}
932
933/*
934 * Function : sata_dwc_isr
935 * arguments : irq, void *dev_instance, struct pt_regs *regs
936 * Return value : irqreturn_t - status of IRQ
937 * This Interrupt handler called via port ops registered function.
938 * .irq_handler = sata_dwc_isr
939 */
940static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
941{
942 struct ata_host *host = (struct ata_host *)dev_instance;
943 struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
944 struct ata_port *ap;
945 struct ata_queued_cmd *qc;
946 unsigned long flags;
947 u8 status, tag;
948 int handled, num_processed, port = 0;
949 uint intpr, sactive, sactive2, tag_mask;
950 struct sata_dwc_device_port *hsdevp;
951 host_pvt.sata_dwc_sactive_issued = 0;
952
953 spin_lock_irqsave(&host->lock, flags);
954
955 /* Read the interrupt register */
956 intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
957
958 ap = host->ports[port];
959 hsdevp = HSDEVP_FROM_AP(ap);
960
961 dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
962 ap->link.active_tag);
963
964 /* Check for error interrupt */
965 if (intpr & SATA_DWC_INTPR_ERR) {
966 sata_dwc_error_intr(ap, hsdev, intpr);
967 handled = 1;
968 goto DONE;
969 }
970
971 /* Check for DMA SETUP FIS (FP DMA) interrupt */
972 if (intpr & SATA_DWC_INTPR_NEWFP) {
973 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
974
975 tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr));
976 dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
977 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
978 dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
979
980 host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag);
981
982 qc = ata_qc_from_tag(ap, tag);
983 /*
984 * Start FP DMA for NCQ command. At this point the tag is the
985 * active tag. It is the tag that matches the command about to
986 * be completed.
987 */
988 qc->ap->link.active_tag = tag;
989 sata_dwc_bmdma_start_by_tag(qc, tag);
990
991 handled = 1;
992 goto DONE;
993 }
994 sactive = core_scr_read(SCR_ACTIVE);
995 tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
996
997 /* If no sactive issued and tag_mask is zero then this is not NCQ */
998 if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) {
999 if (ap->link.active_tag == ATA_TAG_POISON)
1000 tag = 0;
1001 else
1002 tag = ap->link.active_tag;
1003 qc = ata_qc_from_tag(ap, tag);
1004
1005 /* DEV interrupt w/ no active qc? */
1006 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
d578514b
AS
1007 dev_err(ap->dev,
1008 "%s interrupt with no active qc qc=%p\n",
1009 __func__, qc);
62936009
RS
1010 ap->ops->sff_check_status(ap);
1011 handled = 1;
1012 goto DONE;
1013 }
1014 status = ap->ops->sff_check_status(ap);
1015
1016 qc->ap->link.active_tag = tag;
1017 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
1018
1019 if (status & ATA_ERR) {
1020 dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
1021 sata_dwc_qc_complete(ap, qc, 1);
1022 handled = 1;
1023 goto DONE;
1024 }
1025
1026 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
84b47e3b 1027 __func__, get_prot_descript(qc->tf.protocol));
62936009
RS
1028DRVSTILLBUSY:
1029 if (ata_is_dma(qc->tf.protocol)) {
1030 /*
1031 * Each DMA transaction produces 2 interrupts. The DMAC
1032 * transfer complete interrupt and the SATA controller
1033 * operation done interrupt. The command should be
1034 * completed only after both interrupts are seen.
1035 */
1036 host_pvt.dma_interrupt_count++;
1037 if (hsdevp->dma_pending[tag] == \
1038 SATA_DWC_DMA_PENDING_NONE) {
d578514b
AS
1039 dev_err(ap->dev,
1040 "%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n",
1041 __func__, intpr, status,
62936009
RS
1042 hsdevp->dma_pending[tag]);
1043 }
1044
1045 if ((host_pvt.dma_interrupt_count % 2) == 0)
1046 sata_dwc_dma_xfer_complete(ap, 1);
1047 } else if (ata_is_pio(qc->tf.protocol)) {
1048 ata_sff_hsm_move(ap, qc, status, 0);
1049 handled = 1;
1050 goto DONE;
1051 } else {
1052 if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
1053 goto DRVSTILLBUSY;
1054 }
1055
1056 handled = 1;
1057 goto DONE;
1058 }
1059
1060 /*
1061 * This is a NCQ command. At this point we need to figure out for which
1062 * tags we have gotten a completion interrupt. One interrupt may serve
1063 * as completion for more than one operation when commands are queued
1064 * (NCQ). We need to process each completed command.
1065 */
1066
1067 /* process completed commands */
1068 sactive = core_scr_read(SCR_ACTIVE);
1069 tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
1070
1071 if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \
1072 tag_mask > 1) {
d578514b
AS
1073 dev_dbg(ap->dev,
1074 "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
1075 __func__, sactive, host_pvt.sata_dwc_sactive_issued,
1076 tag_mask);
62936009
RS
1077 }
1078
1079 if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \
1080 (host_pvt.sata_dwc_sactive_issued)) {
d578514b
AS
1081 dev_warn(ap->dev,
1082 "Bad tag mask? sactive=0x%08x (host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask=0x%08x\n",
1083 sactive, host_pvt.sata_dwc_sactive_issued, tag_mask);
62936009
RS
1084 }
1085
1086 /* read just to clear ... not bad if currently still busy */
1087 status = ap->ops->sff_check_status(ap);
1088 dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
1089
1090 tag = 0;
1091 num_processed = 0;
1092 while (tag_mask) {
1093 num_processed++;
1094 while (!(tag_mask & 0x00000001)) {
1095 tag++;
1096 tag_mask <<= 1;
1097 }
1098
1099 tag_mask &= (~0x00000001);
1100 qc = ata_qc_from_tag(ap, tag);
1101
1102 /* To be picked up by completion functions */
1103 qc->ap->link.active_tag = tag;
1104 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
1105
1106 /* Let libata/scsi layers handle error */
1107 if (status & ATA_ERR) {
1108 dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
1109 status);
1110 sata_dwc_qc_complete(ap, qc, 1);
1111 handled = 1;
1112 goto DONE;
1113 }
1114
1115 /* Process completed command */
1116 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
84b47e3b 1117 get_prot_descript(qc->tf.protocol));
62936009
RS
1118 if (ata_is_dma(qc->tf.protocol)) {
1119 host_pvt.dma_interrupt_count++;
1120 if (hsdevp->dma_pending[tag] == \
1121 SATA_DWC_DMA_PENDING_NONE)
1122 dev_warn(ap->dev, "%s: DMA not pending?\n",
1123 __func__);
1124 if ((host_pvt.dma_interrupt_count % 2) == 0)
1125 sata_dwc_dma_xfer_complete(ap, 1);
1126 } else {
1127 if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
1128 goto STILLBUSY;
1129 }
1130 continue;
1131
1132STILLBUSY:
1133 ap->stats.idle_irq++;
1134 dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
1135 ap->print_id);
1136 } /* while tag_mask */
1137
1138 /*
1139 * Check to see if any commands completed while we were processing our
1140 * initial set of completed commands (read status clears interrupts,
1141 * so we might miss a completed command interrupt if one came in while
1142 * we were processing --we read status as part of processing a completed
1143 * command).
1144 */
1145 sactive2 = core_scr_read(SCR_ACTIVE);
1146 if (sactive2 != sactive) {
d578514b
AS
1147 dev_dbg(ap->dev,
1148 "More completed - sactive=0x%x sactive2=0x%x\n",
1149 sactive, sactive2);
62936009
RS
1150 }
1151 handled = 1;
1152
1153DONE:
1154 spin_unlock_irqrestore(&host->lock, flags);
1155 return IRQ_RETVAL(handled);
1156}
1157
1158static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
1159{
1160 struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
1161
1162 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
1163 out_le32(&(hsdev->sata_dwc_regs->dmacr),
1164 SATA_DWC_DMACR_RX_CLEAR(
1165 in_le32(&(hsdev->sata_dwc_regs->dmacr))));
1166 } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
1167 out_le32(&(hsdev->sata_dwc_regs->dmacr),
1168 SATA_DWC_DMACR_TX_CLEAR(
1169 in_le32(&(hsdev->sata_dwc_regs->dmacr))));
1170 } else {
1171 /*
1172 * This should not happen, it indicates the driver is out of
1173 * sync. If it does happen, clear dmacr anyway.
1174 */
d578514b
AS
1175 dev_err(host_pvt.dwc_dev,
1176 "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
1177 __func__, tag, hsdevp->dma_pending[tag],
1178 in_le32(&hsdev->sata_dwc_regs->dmacr));
62936009
RS
1179 out_le32(&(hsdev->sata_dwc_regs->dmacr),
1180 SATA_DWC_DMACR_TXRXCH_CLEAR);
1181 }
1182}
1183
1184static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
1185{
1186 struct ata_queued_cmd *qc;
1187 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1188 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1189 u8 tag = 0;
1190
1191 tag = ap->link.active_tag;
1192 qc = ata_qc_from_tag(ap, tag);
1193 if (!qc) {
1194 dev_err(ap->dev, "failed to get qc");
1195 return;
1196 }
1197
1198#ifdef DEBUG_NCQ
1199 if (tag > 0) {
d578514b
AS
1200 dev_info(ap->dev,
1201 "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n",
1202 __func__, qc->tag, qc->tf.command,
84b47e3b
SS
1203 get_dma_dir_descript(qc->dma_dir),
1204 get_prot_descript(qc->tf.protocol),
62936009
RS
1205 in_le32(&(hsdev->sata_dwc_regs->dmacr)));
1206 }
1207#endif
1208
1209 if (ata_is_dma(qc->tf.protocol)) {
1210 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
d578514b
AS
1211 dev_err(ap->dev,
1212 "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
1213 __func__,
62936009
RS
1214 in_le32(&(hsdev->sata_dwc_regs->dmacr)));
1215 }
1216
1217 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
1218 sata_dwc_qc_complete(ap, qc, check_status);
1219 ap->link.active_tag = ATA_TAG_POISON;
1220 } else {
1221 sata_dwc_qc_complete(ap, qc, check_status);
1222 }
1223}
1224
1225static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
1226 u32 check_status)
1227{
1228 u8 status = 0;
1229 u32 mask = 0x0;
1230 u8 tag = qc->tag;
1231 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1232 host_pvt.sata_dwc_sactive_queued = 0;
1233 dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
1234
1235 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
1236 dev_err(ap->dev, "TX DMA PENDING\n");
1237 else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
1238 dev_err(ap->dev, "RX DMA PENDING\n");
d578514b
AS
1239 dev_dbg(ap->dev,
1240 "QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n",
1241 qc->tf.command, status, ap->print_id, qc->tf.protocol);
62936009
RS
1242
1243 /* clear active bit */
1244 mask = (~(qcmd_tag_to_mask(tag)));
1245 host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \
1246 & mask;
1247 host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \
1248 & mask;
1249 ata_qc_complete(qc);
1250 return 0;
1251}
1252
1253static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
1254{
1255 /* Enable selective interrupts by setting the interrupt maskregister*/
1256 out_le32(&hsdev->sata_dwc_regs->intmr,
1257 SATA_DWC_INTMR_ERRM |
1258 SATA_DWC_INTMR_NEWFPM |
1259 SATA_DWC_INTMR_PMABRTM |
1260 SATA_DWC_INTMR_DMATM);
1261 /*
1262 * Unmask the error bits that should trigger an error interrupt by
1263 * setting the error mask register.
1264 */
1265 out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
1266
1267 dev_dbg(host_pvt.dwc_dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
1268 __func__, in_le32(&hsdev->sata_dwc_regs->intmr),
1269 in_le32(&hsdev->sata_dwc_regs->errmr));
1270}
1271
1272static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
1273{
d7c256e8
AS
1274 port->cmd_addr = (void __iomem *)base + 0x00;
1275 port->data_addr = (void __iomem *)base + 0x00;
62936009 1276
d7c256e8
AS
1277 port->error_addr = (void __iomem *)base + 0x04;
1278 port->feature_addr = (void __iomem *)base + 0x04;
62936009 1279
d7c256e8 1280 port->nsect_addr = (void __iomem *)base + 0x08;
62936009 1281
d7c256e8
AS
1282 port->lbal_addr = (void __iomem *)base + 0x0c;
1283 port->lbam_addr = (void __iomem *)base + 0x10;
1284 port->lbah_addr = (void __iomem *)base + 0x14;
62936009 1285
d7c256e8
AS
1286 port->device_addr = (void __iomem *)base + 0x18;
1287 port->command_addr = (void __iomem *)base + 0x1c;
1288 port->status_addr = (void __iomem *)base + 0x1c;
62936009 1289
d7c256e8
AS
1290 port->altstatus_addr = (void __iomem *)base + 0x20;
1291 port->ctl_addr = (void __iomem *)base + 0x20;
62936009
RS
1292}
1293
1294/*
1295 * Function : sata_dwc_port_start
1296 * arguments : struct ata_ioports *port
1297 * Return value : returns 0 if success, error code otherwise
1298 * This function allocates the scatter gather LLI table for AHB DMA
1299 */
1300static int sata_dwc_port_start(struct ata_port *ap)
1301{
1302 int err = 0;
1303 struct sata_dwc_device *hsdev;
1304 struct sata_dwc_device_port *hsdevp = NULL;
1305 struct device *pdev;
1306 int i;
1307
1308 hsdev = HSDEV_FROM_AP(ap);
1309
1310 dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
1311
1312 hsdev->host = ap->host;
1313 pdev = ap->host->dev;
1314 if (!pdev) {
1315 dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
1316 err = -ENODEV;
1317 goto CLEANUP;
1318 }
1319
1320 /* Allocate Port Struct */
1321 hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
1322 if (!hsdevp) {
1323 dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__);
1324 err = -ENOMEM;
1325 goto CLEANUP;
1326 }
1327 hsdevp->hsdev = hsdev;
1328
1329 for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
1330 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
1331
d7c256e8 1332 ap->bmdma_prd = NULL; /* set these so libata doesn't use them */
62936009
RS
1333 ap->bmdma_prd_dma = 0;
1334
1335 /*
1336 * DMA - Assign scatter gather LLI table. We can't use the libata
1337 * version since it's PRD is IDE PCI specific.
1338 */
1339 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
1340 hsdevp->llit[i] = dma_alloc_coherent(pdev,
1341 SATA_DWC_DMAC_LLI_TBL_SZ,
1342 &(hsdevp->llit_dma[i]),
1343 GFP_ATOMIC);
1344 if (!hsdevp->llit[i]) {
1345 dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
1346 __func__);
1347 err = -ENOMEM;
a081da63 1348 goto CLEANUP_ALLOC;
62936009
RS
1349 }
1350 }
1351
1352 if (ap->port_no == 0) {
1353 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
1354 __func__);
1355 out_le32(&hsdev->sata_dwc_regs->dmacr,
1356 SATA_DWC_DMACR_TXRXCH_CLEAR);
1357
1358 dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
1359 __func__);
1360 out_le32(&hsdev->sata_dwc_regs->dbtsr,
1361 (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
1362 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
1363 }
1364
1365 /* Clear any error bits before libata starts issuing commands */
1366 clear_serror();
1367 ap->private_data = hsdevp;
a081da63
JL
1368 dev_dbg(ap->dev, "%s: done\n", __func__);
1369 return 0;
62936009 1370
a081da63
JL
1371CLEANUP_ALLOC:
1372 kfree(hsdevp);
62936009 1373CLEANUP:
a081da63 1374 dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
62936009
RS
1375 return err;
1376}
1377
1378static void sata_dwc_port_stop(struct ata_port *ap)
1379{
1380 int i;
1381 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1382 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1383
1384 dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
1385
1386 if (hsdevp && hsdev) {
1387 /* deallocate LLI table */
1388 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
1389 dma_free_coherent(ap->host->dev,
1390 SATA_DWC_DMAC_LLI_TBL_SZ,
1391 hsdevp->llit[i], hsdevp->llit_dma[i]);
1392 }
1393
1394 kfree(hsdevp);
1395 }
1396 ap->private_data = NULL;
1397}
1398
1399/*
1400 * Function : sata_dwc_exec_command_by_tag
1401 * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
1402 * Return value : None
1403 * This function keeps track of individual command tag ids and calls
1404 * ata_exec_command in libata
1405 */
1406static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
1407 struct ata_taskfile *tf,
1408 u8 tag, u32 cmd_issued)
1409{
1410 unsigned long flags;
1411 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1412
1413 dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
c211962d 1414 ata_get_cmd_descript(tf->command), tag);
62936009
RS
1415
1416 spin_lock_irqsave(&ap->host->lock, flags);
1417 hsdevp->cmd_issued[tag] = cmd_issued;
1418 spin_unlock_irqrestore(&ap->host->lock, flags);
1419 /*
1420 * Clear SError before executing a new command.
1421 * sata_dwc_scr_write and read can not be used here. Clearing the PM
1422 * managed SError register for the disk needs to be done before the
1423 * task file is loaded.
1424 */
1425 clear_serror();
1426 ata_sff_exec_command(ap, tf);
1427}
1428
1429static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
1430{
1431 sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
1432 SATA_DWC_CMD_ISSUED_PEND);
1433}
1434
1435static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
1436{
1437 u8 tag = qc->tag;
1438
1439 if (ata_is_ncq(qc->tf.protocol)) {
1440 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
1441 __func__, qc->ap->link.sactive, tag);
1442 } else {
1443 tag = 0;
1444 }
1445 sata_dwc_bmdma_setup_by_tag(qc, tag);
1446}
1447
1448static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
1449{
1450 int start_dma;
1451 u32 reg, dma_chan;
1452 struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
1453 struct ata_port *ap = qc->ap;
1454 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1455 int dir = qc->dma_dir;
1456 dma_chan = hsdevp->dma_chan[tag];
1457
1458 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
1459 start_dma = 1;
1460 if (dir == DMA_TO_DEVICE)
1461 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
1462 else
1463 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
1464 } else {
d578514b
AS
1465 dev_err(ap->dev,
1466 "%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n",
1467 __func__, hsdevp->cmd_issued[tag], tag);
62936009
RS
1468 start_dma = 0;
1469 }
1470
d578514b
AS
1471 dev_dbg(ap->dev,
1472 "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s start_dma? %x\n",
1473 __func__, qc, tag, qc->tf.command,
84b47e3b 1474 get_dma_dir_descript(qc->dma_dir), start_dma);
62936009
RS
1475 sata_dwc_tf_dump(&(qc->tf));
1476
1477 if (start_dma) {
1478 reg = core_scr_read(SCR_ERROR);
1479 if (reg & SATA_DWC_SERROR_ERR_BITS) {
1480 dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
1481 __func__, reg);
1482 }
1483
1484 if (dir == DMA_TO_DEVICE)
1485 out_le32(&hsdev->sata_dwc_regs->dmacr,
1486 SATA_DWC_DMACR_TXCHEN);
1487 else
1488 out_le32(&hsdev->sata_dwc_regs->dmacr,
1489 SATA_DWC_DMACR_RXCHEN);
1490
1491 /* Enable AHB DMA transfer on the specified channel */
1492 dma_dwc_xfer_start(dma_chan);
1493 }
1494}
1495
1496static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
1497{
1498 u8 tag = qc->tag;
1499
1500 if (ata_is_ncq(qc->tf.protocol)) {
1501 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
1502 __func__, qc->ap->link.sactive, tag);
1503 } else {
1504 tag = 0;
1505 }
1506 dev_dbg(qc->ap->dev, "%s\n", __func__);
1507 sata_dwc_bmdma_start_by_tag(qc, tag);
1508}
1509
1510/*
1511 * Function : sata_dwc_qc_prep_by_tag
1512 * arguments : ata_queued_cmd *qc, u8 tag
1513 * Return value : None
1514 * qc_prep for a particular queued command based on tag
1515 */
1516static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
1517{
1518 struct scatterlist *sg = qc->sg;
1519 struct ata_port *ap = qc->ap;
d26377b8 1520 int dma_chan;
62936009
RS
1521 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1522 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
62936009
RS
1523
1524 dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
84b47e3b 1525 __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir),
62936009
RS
1526 qc->n_elem);
1527
1528 dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag],
1529 hsdevp->llit_dma[tag],
d7c256e8
AS
1530 (void __iomem *)&hsdev->sata_dwc_regs->dmadr,
1531 qc->dma_dir);
62936009
RS
1532 if (dma_chan < 0) {
1533 dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
c211962d 1534 __func__, dma_chan);
62936009
RS
1535 return;
1536 }
1537 hsdevp->dma_chan[tag] = dma_chan;
1538}
1539
1540static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
1541{
1542 u32 sactive;
1543 u8 tag = qc->tag;
1544 struct ata_port *ap = qc->ap;
1545
1546#ifdef DEBUG_NCQ
1547 if (qc->tag > 0 || ap->link.sactive > 1)
d578514b
AS
1548 dev_info(ap->dev,
1549 "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
62936009 1550 __func__, ap->print_id, qc->tf.command,
c211962d 1551 ata_get_cmd_descript(qc->tf.command),
84b47e3b 1552 qc->tag, get_prot_descript(qc->tf.protocol),
62936009
RS
1553 ap->link.active_tag, ap->link.sactive);
1554#endif
1555
1556 if (!ata_is_ncq(qc->tf.protocol))
1557 tag = 0;
1558 sata_dwc_qc_prep_by_tag(qc, tag);
1559
1560 if (ata_is_ncq(qc->tf.protocol)) {
1561 sactive = core_scr_read(SCR_ACTIVE);
1562 sactive |= (0x00000001 << tag);
1563 core_scr_write(SCR_ACTIVE, sactive);
1564
d578514b
AS
1565 dev_dbg(qc->ap->dev,
1566 "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n",
1567 __func__, tag, qc->ap->link.sactive, sactive);
62936009
RS
1568
1569 ap->ops->sff_tf_load(ap, &qc->tf);
1570 sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag,
1571 SATA_DWC_CMD_ISSUED_PEND);
1572 } else {
1573 ata_sff_qc_issue(qc);
1574 }
1575 return 0;
1576}
1577
1578/*
1579 * Function : sata_dwc_qc_prep
1580 * arguments : ata_queued_cmd *qc
1581 * Return value : None
1582 * qc_prep for a particular queued command
1583 */
1584
1585static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
1586{
1587 if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
1588 return;
1589
1590#ifdef DEBUG_NCQ
1591 if (qc->tag > 0)
1592 dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
c211962d 1593 __func__, qc->tag, qc->ap->link.active_tag);
62936009
RS
1594
1595 return ;
1596#endif
1597}
1598
1599static void sata_dwc_error_handler(struct ata_port *ap)
1600{
62936009
RS
1601 ata_sff_error_handler(ap);
1602}
1603
d7c256e8
AS
1604static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
1605 unsigned long deadline)
3a8b788f
TN
1606{
1607 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
1608 int ret;
1609
1610 ret = sata_sff_hardreset(link, class, deadline);
1611
1612 sata_dwc_enable_interrupts(hsdev);
1613
1614 /* Reconfigure the DMA control register */
1615 out_le32(&hsdev->sata_dwc_regs->dmacr,
1616 SATA_DWC_DMACR_TXRXCH_CLEAR);
1617
1618 /* Reconfigure the DMA Burst Transaction Size register */
1619 out_le32(&hsdev->sata_dwc_regs->dbtsr,
1620 SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
1621 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
1622
1623 return ret;
1624}
1625
62936009
RS
1626/*
1627 * scsi mid-layer and libata interface structures
1628 */
1629static struct scsi_host_template sata_dwc_sht = {
1630 ATA_NCQ_SHT(DRV_NAME),
1631 /*
1632 * test-only: Currently this driver doesn't handle NCQ
1633 * correctly. We enable NCQ but set the queue depth to a
1634 * max of 1. This will get fixed in in a future release.
1635 */
1636 .sg_tablesize = LIBATA_MAX_PRD,
d7c256e8 1637 /* .can_queue = ATA_MAX_QUEUE, */
62936009
RS
1638 .dma_boundary = ATA_DMA_BOUNDARY,
1639};
1640
1641static struct ata_port_operations sata_dwc_ops = {
1642 .inherits = &ata_sff_port_ops,
1643
1644 .error_handler = sata_dwc_error_handler,
3a8b788f 1645 .hardreset = sata_dwc_hardreset,
62936009
RS
1646
1647 .qc_prep = sata_dwc_qc_prep,
1648 .qc_issue = sata_dwc_qc_issue,
1649
1650 .scr_read = sata_dwc_scr_read,
1651 .scr_write = sata_dwc_scr_write,
1652
1653 .port_start = sata_dwc_port_start,
1654 .port_stop = sata_dwc_port_stop,
1655
1656 .bmdma_setup = sata_dwc_bmdma_setup,
1657 .bmdma_start = sata_dwc_bmdma_start,
1658};
1659
1660static const struct ata_port_info sata_dwc_port_info[] = {
1661 {
9cbe056f 1662 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
b83a4c39 1663 .pio_mask = ATA_PIO4,
62936009
RS
1664 .udma_mask = ATA_UDMA6,
1665 .port_ops = &sata_dwc_ops,
1666 },
1667};
1668
1c48a5c9 1669static int sata_dwc_probe(struct platform_device *ofdev)
62936009
RS
1670{
1671 struct sata_dwc_device *hsdev;
1672 u32 idr, versionr;
1673 char *ver = (char *)&versionr;
d7c256e8 1674 u8 __iomem *base;
62936009 1675 int err = 0;
4aaa7187 1676 int irq;
62936009
RS
1677 struct ata_host *host;
1678 struct ata_port_info pi = sata_dwc_port_info[0];
1679 const struct ata_port_info *ppi[] = { &pi, NULL };
dc7f71f4
TN
1680 struct device_node *np = ofdev->dev.of_node;
1681 u32 dma_chan;
62936009
RS
1682
1683 /* Allocate DWC SATA device */
d537fc0c
AS
1684 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
1685 hsdev = devm_kzalloc(&ofdev->dev, sizeof(*hsdev), GFP_KERNEL);
1686 if (!host || !hsdev)
c592b74f 1687 return -ENOMEM;
62936009 1688
d537fc0c
AS
1689 host->private_data = hsdev;
1690
dc7f71f4
TN
1691 if (of_property_read_u32(np, "dma-channel", &dma_chan)) {
1692 dev_warn(&ofdev->dev, "no dma-channel property set."
1693 " Use channel 0\n");
1694 dma_chan = 0;
1695 }
1696 host_pvt.dma_channel = dma_chan;
1697
62936009 1698 /* Ioremap SATA registers */
9037908f 1699 base = of_iomap(np, 0);
62936009 1700 if (!base) {
d578514b
AS
1701 dev_err(&ofdev->dev,
1702 "ioremap failed for SATA register address\n");
d537fc0c 1703 return -ENODEV;
62936009
RS
1704 }
1705 hsdev->reg_base = base;
1706 dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
1707
1708 /* Synopsys DWC SATA specific Registers */
1709 hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
1710
62936009
RS
1711 /* Setup port */
1712 host->ports[0]->ioaddr.cmd_addr = base;
1713 host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
1714 host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
1715 sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);
1716
1717 /* Read the ID and Version Registers */
1718 idr = in_le32(&hsdev->sata_dwc_regs->idr);
1719 versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
1720 dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
1721 idr, ver[0], ver[1], ver[2]);
1722
1723 /* Get SATA DMA interrupt number */
9037908f 1724 irq = irq_of_parse_and_map(np, 1);
62936009
RS
1725 if (irq == NO_IRQ) {
1726 dev_err(&ofdev->dev, "no SATA DMA irq\n");
1727 err = -ENODEV;
4aaa7187 1728 goto error_iomap;
62936009
RS
1729 }
1730
1731 /* Get physical SATA DMA register base address */
9037908f 1732 host_pvt.sata_dma_regs = (void *)of_iomap(np, 1);
62936009 1733 if (!(host_pvt.sata_dma_regs)) {
d578514b
AS
1734 dev_err(&ofdev->dev,
1735 "ioremap failed for AHBDMA register address\n");
62936009 1736 err = -ENODEV;
4aaa7187 1737 goto error_iomap;
62936009
RS
1738 }
1739
1740 /* Save dev for later use in dev_xxx() routines */
1741 host_pvt.dwc_dev = &ofdev->dev;
1742
1743 /* Initialize AHB DMAC */
4aaa7187
AS
1744 err = dma_dwc_init(hsdev, irq);
1745 if (err)
1746 goto error_dma_iomap;
62936009
RS
1747
1748 /* Enable SATA Interrupts */
1749 sata_dwc_enable_interrupts(hsdev);
1750
1751 /* Get SATA interrupt number */
9037908f 1752 irq = irq_of_parse_and_map(np, 0);
62936009
RS
1753 if (irq == NO_IRQ) {
1754 dev_err(&ofdev->dev, "no SATA DMA irq\n");
1755 err = -ENODEV;
1756 goto error_out;
1757 }
1758
1759 /*
1760 * Now, register with libATA core, this will also initiate the
1761 * device discovery process, invoking our port_start() handler &
1762 * error_handler() to execute a dummy Softreset EH session
1763 */
4aaa7187
AS
1764 err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
1765 if (err)
62936009
RS
1766 dev_err(&ofdev->dev, "failed to activate host");
1767
1768 dev_set_drvdata(&ofdev->dev, host);
1769 return 0;
1770
1771error_out:
1772 /* Free SATA DMA resources */
1773 dma_dwc_exit(hsdev);
4aaa7187
AS
1774error_dma_iomap:
1775 iounmap((void __iomem *)host_pvt.sata_dma_regs);
04e506b5
VK
1776error_iomap:
1777 iounmap(base);
62936009
RS
1778 return err;
1779}
1780
60652d07 1781static int sata_dwc_remove(struct platform_device *ofdev)
62936009
RS
1782{
1783 struct device *dev = &ofdev->dev;
1784 struct ata_host *host = dev_get_drvdata(dev);
1785 struct sata_dwc_device *hsdev = host->private_data;
1786
1787 ata_host_detach(host);
62936009
RS
1788
1789 /* Free SATA DMA resources */
1790 dma_dwc_exit(hsdev);
1791
4aaa7187 1792 iounmap((void __iomem *)host_pvt.sata_dma_regs);
62936009 1793 iounmap(hsdev->reg_base);
62936009
RS
1794 dev_dbg(&ofdev->dev, "done\n");
1795 return 0;
1796}
1797
1798static const struct of_device_id sata_dwc_match[] = {
1799 { .compatible = "amcc,sata-460ex", },
1800 {}
1801};
1802MODULE_DEVICE_TABLE(of, sata_dwc_match);
1803
1c48a5c9 1804static struct platform_driver sata_dwc_driver = {
62936009
RS
1805 .driver = {
1806 .name = DRV_NAME,
62936009
RS
1807 .of_match_table = sata_dwc_match,
1808 },
1809 .probe = sata_dwc_probe,
1810 .remove = sata_dwc_remove,
1811};
1812
99c8ea3e 1813module_platform_driver(sata_dwc_driver);
62936009
RS
1814
1815MODULE_LICENSE("GPL");
1816MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
d578514b 1817MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver");
62936009 1818MODULE_VERSION(DRV_VERSION);