]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/ata/sata_highbank.c
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[mirror_ubuntu-bionic-kernel.git] / drivers / ata / sata_highbank.c
1 /*
2 * Calxeda Highbank AHCI SATA platform driver
3 * Copyright 2012 Calxeda, Inc.
4 *
5 * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <linux/kernel.h>
20 #include <linux/gfp.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/types.h>
24 #include <linux/err.h>
25 #include <linux/io.h>
26 #include <linux/spinlock.h>
27 #include <linux/device.h>
28 #include <linux/of_device.h>
29 #include <linux/of_address.h>
30 #include <linux/platform_device.h>
31 #include <linux/libata.h>
32 #include <linux/ahci_platform.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/export.h>
36 #include <linux/gpio.h>
37 #include <linux/of_gpio.h>
38
39 #include "ahci.h"
40
41 #define CPHY_MAP(dev, addr) ((((dev) & 0x1f) << 7) | (((addr) >> 9) & 0x7f))
42 #define CPHY_ADDR(addr) (((addr) & 0x1ff) << 2)
43 #define SERDES_CR_CTL 0x80a0
44 #define SERDES_CR_ADDR 0x80a1
45 #define SERDES_CR_DATA 0x80a2
46 #define CR_BUSY 0x0001
47 #define CR_START 0x0001
48 #define CR_WR_RDN 0x0002
49 #define CPHY_TX_INPUT_STS 0x2001
50 #define CPHY_RX_INPUT_STS 0x2002
51 #define CPHY_SATA_TX_OVERRIDE 0x8000
52 #define CPHY_SATA_RX_OVERRIDE 0x4000
53 #define CPHY_TX_OVERRIDE 0x2004
54 #define CPHY_RX_OVERRIDE 0x2005
55 #define SPHY_LANE 0x100
56 #define SPHY_HALF_RATE 0x0001
57 #define CPHY_SATA_DPLL_MODE 0x0700
58 #define CPHY_SATA_DPLL_SHIFT 8
59 #define CPHY_SATA_DPLL_RESET (1 << 11)
60 #define CPHY_SATA_TX_ATTEN 0x1c00
61 #define CPHY_SATA_TX_ATTEN_SHIFT 10
62 #define CPHY_PHY_COUNT 6
63 #define CPHY_LANE_COUNT 4
64 #define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT)
65
66 static DEFINE_SPINLOCK(cphy_lock);
67 /* Each of the 6 phys can have up to 4 sata ports attached to i. Map 0-based
68 * sata ports to their phys and then to their lanes within the phys
69 */
70 struct phy_lane_info {
71 void __iomem *phy_base;
72 u8 lane_mapping;
73 u8 phy_devs;
74 u8 tx_atten;
75 };
76 static struct phy_lane_info port_data[CPHY_PORT_COUNT];
77
78 static DEFINE_SPINLOCK(sgpio_lock);
79 #define SCLOCK 0
80 #define SLOAD 1
81 #define SDATA 2
82 #define SGPIO_PINS 3
83 #define SGPIO_PORTS 8
84
85 struct ecx_plat_data {
86 u32 n_ports;
87 /* number of extra clocks that the SGPIO PIC controller expects */
88 u32 pre_clocks;
89 u32 post_clocks;
90 unsigned sgpio_gpio[SGPIO_PINS];
91 u32 sgpio_pattern;
92 u32 port_to_sgpio[SGPIO_PORTS];
93 };
94
95 #define SGPIO_SIGNALS 3
96 #define ECX_ACTIVITY_BITS 0x300000
97 #define ECX_ACTIVITY_SHIFT 0
98 #define ECX_LOCATE_BITS 0x80000
99 #define ECX_LOCATE_SHIFT 1
100 #define ECX_FAULT_BITS 0x400000
101 #define ECX_FAULT_SHIFT 2
102 static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
103 u32 shift)
104 {
105 return 1 << (3 * pdata->port_to_sgpio[port] + shift);
106 }
107
108 static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
109 {
110 if (state & ECX_ACTIVITY_BITS)
111 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
112 ECX_ACTIVITY_SHIFT);
113 else
114 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
115 ECX_ACTIVITY_SHIFT);
116 if (state & ECX_LOCATE_BITS)
117 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
118 ECX_LOCATE_SHIFT);
119 else
120 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
121 ECX_LOCATE_SHIFT);
122 if (state & ECX_FAULT_BITS)
123 pdata->sgpio_pattern |= sgpio_bit_shift(pdata, port,
124 ECX_FAULT_SHIFT);
125 else
126 pdata->sgpio_pattern &= ~sgpio_bit_shift(pdata, port,
127 ECX_FAULT_SHIFT);
128 }
129
130 /*
131 * Tell the LED controller that the signal has changed by raising the clock
132 * line for 50 uS and then lowering it for 50 uS.
133 */
134 static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
135 {
136 gpio_set_value(pdata->sgpio_gpio[SCLOCK], 1);
137 udelay(50);
138 gpio_set_value(pdata->sgpio_gpio[SCLOCK], 0);
139 udelay(50);
140 }
141
142 static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
143 ssize_t size)
144 {
145 struct ahci_host_priv *hpriv = ap->host->private_data;
146 struct ecx_plat_data *pdata = (struct ecx_plat_data *) hpriv->plat_data;
147 struct ahci_port_priv *pp = ap->private_data;
148 unsigned long flags;
149 int pmp, i;
150 struct ahci_em_priv *emp;
151 u32 sgpio_out;
152
153 /* get the slot number from the message */
154 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
155 if (pmp < EM_MAX_SLOTS)
156 emp = &pp->em_priv[pmp];
157 else
158 return -EINVAL;
159
160 if (!(hpriv->em_msg_type & EM_MSG_TYPE_LED))
161 return size;
162
163 spin_lock_irqsave(&sgpio_lock, flags);
164 ecx_parse_sgpio(pdata, ap->port_no, state);
165 sgpio_out = pdata->sgpio_pattern;
166 for (i = 0; i < pdata->pre_clocks; i++)
167 ecx_led_cycle_clock(pdata);
168
169 gpio_set_value(pdata->sgpio_gpio[SLOAD], 1);
170 ecx_led_cycle_clock(pdata);
171 gpio_set_value(pdata->sgpio_gpio[SLOAD], 0);
172 /*
173 * bit-bang out the SGPIO pattern, by consuming a bit and then
174 * clocking it out.
175 */
176 for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
177 gpio_set_value(pdata->sgpio_gpio[SDATA], sgpio_out & 1);
178 sgpio_out >>= 1;
179 ecx_led_cycle_clock(pdata);
180 }
181 for (i = 0; i < pdata->post_clocks; i++)
182 ecx_led_cycle_clock(pdata);
183
184 /* save off new led state for port/slot */
185 emp->led_state = state;
186
187 spin_unlock_irqrestore(&sgpio_lock, flags);
188 return size;
189 }
190
191 static void highbank_set_em_messages(struct device *dev,
192 struct ahci_host_priv *hpriv,
193 struct ata_port_info *pi)
194 {
195 struct device_node *np = dev->of_node;
196 struct ecx_plat_data *pdata = hpriv->plat_data;
197 int i;
198 int err;
199
200 for (i = 0; i < SGPIO_PINS; i++) {
201 err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
202 if (IS_ERR_VALUE(err))
203 return;
204
205 pdata->sgpio_gpio[i] = err;
206 err = gpio_request(pdata->sgpio_gpio[i], "CX SGPIO");
207 if (err) {
208 pr_err("sata_highbank gpio_request %d failed: %d\n",
209 i, err);
210 return;
211 }
212 gpio_direction_output(pdata->sgpio_gpio[i], 1);
213 }
214 of_property_read_u32_array(np, "calxeda,led-order",
215 pdata->port_to_sgpio,
216 pdata->n_ports);
217 if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
218 pdata->pre_clocks = 0;
219 if (of_property_read_u32(np, "calxeda,post-clocks",
220 &pdata->post_clocks))
221 pdata->post_clocks = 0;
222
223 /* store em_loc */
224 hpriv->em_loc = 0;
225 hpriv->em_buf_sz = 4;
226 hpriv->em_msg_type = EM_MSG_TYPE_LED;
227 pi->flags |= ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY;
228 }
229
230 static u32 __combo_phy_reg_read(u8 sata_port, u32 addr)
231 {
232 u32 data;
233 u8 dev = port_data[sata_port].phy_devs;
234 spin_lock(&cphy_lock);
235 writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
236 data = readl(port_data[sata_port].phy_base + CPHY_ADDR(addr));
237 spin_unlock(&cphy_lock);
238 return data;
239 }
240
241 static void __combo_phy_reg_write(u8 sata_port, u32 addr, u32 data)
242 {
243 u8 dev = port_data[sata_port].phy_devs;
244 spin_lock(&cphy_lock);
245 writel(CPHY_MAP(dev, addr), port_data[sata_port].phy_base + 0x800);
246 writel(data, port_data[sata_port].phy_base + CPHY_ADDR(addr));
247 spin_unlock(&cphy_lock);
248 }
249
250 static void combo_phy_wait_for_ready(u8 sata_port)
251 {
252 while (__combo_phy_reg_read(sata_port, SERDES_CR_CTL) & CR_BUSY)
253 udelay(5);
254 }
255
256 static u32 combo_phy_read(u8 sata_port, u32 addr)
257 {
258 combo_phy_wait_for_ready(sata_port);
259 __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
260 __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_START);
261 combo_phy_wait_for_ready(sata_port);
262 return __combo_phy_reg_read(sata_port, SERDES_CR_DATA);
263 }
264
265 static void combo_phy_write(u8 sata_port, u32 addr, u32 data)
266 {
267 combo_phy_wait_for_ready(sata_port);
268 __combo_phy_reg_write(sata_port, SERDES_CR_ADDR, addr);
269 __combo_phy_reg_write(sata_port, SERDES_CR_DATA, data);
270 __combo_phy_reg_write(sata_port, SERDES_CR_CTL, CR_WR_RDN | CR_START);
271 }
272
273 static void highbank_cphy_disable_overrides(u8 sata_port)
274 {
275 u8 lane = port_data[sata_port].lane_mapping;
276 u32 tmp;
277 if (unlikely(port_data[sata_port].phy_base == NULL))
278 return;
279 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
280 tmp &= ~CPHY_SATA_RX_OVERRIDE;
281 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
282 }
283
284 static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
285 {
286 u8 lane = port_data[sata_port].lane_mapping;
287 u32 tmp;
288
289 if (val & 0x8)
290 return;
291
292 tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
293 tmp &= ~CPHY_SATA_TX_OVERRIDE;
294 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
295
296 tmp |= CPHY_SATA_TX_OVERRIDE;
297 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
298
299 tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
300 combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
301 }
302
303 static void cphy_override_rx_mode(u8 sata_port, u32 val)
304 {
305 u8 lane = port_data[sata_port].lane_mapping;
306 u32 tmp;
307 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
308 tmp &= ~CPHY_SATA_RX_OVERRIDE;
309 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
310
311 tmp |= CPHY_SATA_RX_OVERRIDE;
312 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
313
314 tmp &= ~CPHY_SATA_DPLL_MODE;
315 tmp |= val << CPHY_SATA_DPLL_SHIFT;
316 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
317
318 tmp |= CPHY_SATA_DPLL_RESET;
319 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
320
321 tmp &= ~CPHY_SATA_DPLL_RESET;
322 combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
323
324 msleep(15);
325 }
326
327 static void highbank_cphy_override_lane(u8 sata_port)
328 {
329 u8 lane = port_data[sata_port].lane_mapping;
330 u32 tmp, k = 0;
331
332 if (unlikely(port_data[sata_port].phy_base == NULL))
333 return;
334 do {
335 tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS +
336 lane * SPHY_LANE);
337 } while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
338 cphy_override_rx_mode(sata_port, 3);
339 cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
340 }
341
342 static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
343 {
344 struct device_node *sata_node = dev->of_node;
345 int phy_count = 0, phy, port = 0, i;
346 void __iomem *cphy_base[CPHY_PHY_COUNT] = {};
347 struct device_node *phy_nodes[CPHY_PHY_COUNT] = {};
348 u32 tx_atten[CPHY_PORT_COUNT] = {};
349
350 memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
351
352 do {
353 u32 tmp;
354 struct of_phandle_args phy_data;
355 if (of_parse_phandle_with_args(sata_node,
356 "calxeda,port-phys", "#phy-cells",
357 port, &phy_data))
358 break;
359 for (phy = 0; phy < phy_count; phy++) {
360 if (phy_nodes[phy] == phy_data.np)
361 break;
362 }
363 if (phy_nodes[phy] == NULL) {
364 phy_nodes[phy] = phy_data.np;
365 cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
366 if (cphy_base[phy] == NULL) {
367 return 0;
368 }
369 phy_count += 1;
370 }
371 port_data[port].lane_mapping = phy_data.args[0];
372 of_property_read_u32(phy_nodes[phy], "phydev", &tmp);
373 port_data[port].phy_devs = tmp;
374 port_data[port].phy_base = cphy_base[phy];
375 of_node_put(phy_data.np);
376 port += 1;
377 } while (port < CPHY_PORT_COUNT);
378 of_property_read_u32_array(sata_node, "calxeda,tx-atten",
379 tx_atten, port);
380 for (i = 0; i < port; i++)
381 port_data[i].tx_atten = (u8) tx_atten[i];
382 return 0;
383 }
384
385 /*
386 * The Calxeda SATA phy intermittently fails to bring up a link with Gen3
387 * Retrying the phy hard reset can work around the issue, but the drive
388 * may fail again. In less than 150 out of 15000 test runs, it took more
389 * than 10 tries for the link to be established (but never more than 35).
390 * Triple the maximum observed retry count to provide plenty of margin for
391 * rare events and to guarantee that the link is established.
392 *
393 * Also, the default 2 second time-out on a failed drive is too long in
394 * this situation. The uboot implementation of the same driver function
395 * uses a much shorter time-out period and never experiences a time out
396 * issue. Reducing the time-out to 500ms improves the responsiveness.
397 * The other timing constants were kept the same as the stock AHCI driver.
398 * This change was also tested 15000 times on 24 drives and none of them
399 * experienced a time out.
400 */
401 static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
402 unsigned long deadline)
403 {
404 static const unsigned long timing[] = { 5, 100, 500};
405 struct ata_port *ap = link->ap;
406 struct ahci_port_priv *pp = ap->private_data;
407 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
408 struct ata_taskfile tf;
409 bool online;
410 u32 sstatus;
411 int rc;
412 int retry = 100;
413
414 ahci_stop_engine(ap);
415
416 /* clear D2H reception area to properly wait for D2H FIS */
417 ata_tf_init(link->device, &tf);
418 tf.command = ATA_BUSY;
419 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
420
421 do {
422 highbank_cphy_disable_overrides(link->ap->port_no);
423 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
424 highbank_cphy_override_lane(link->ap->port_no);
425
426 /* If the status is 1, we are connected, but the link did not
427 * come up. So retry resetting the link again.
428 */
429 if (sata_scr_read(link, SCR_STATUS, &sstatus))
430 break;
431 if (!(sstatus & 0x3))
432 break;
433 } while (!online && retry--);
434
435 ahci_start_engine(ap);
436
437 if (online)
438 *class = ahci_dev_classify(ap);
439
440 return rc;
441 }
442
443 static struct ata_port_operations ahci_highbank_ops = {
444 .inherits = &ahci_ops,
445 .hardreset = ahci_highbank_hardreset,
446 .transmit_led_message = ecx_transmit_led_message,
447 };
448
449 static const struct ata_port_info ahci_highbank_port_info = {
450 .flags = AHCI_FLAG_COMMON,
451 .pio_mask = ATA_PIO4,
452 .udma_mask = ATA_UDMA6,
453 .port_ops = &ahci_highbank_ops,
454 };
455
456 static struct scsi_host_template ahci_highbank_platform_sht = {
457 AHCI_SHT("sata_highbank"),
458 };
459
460 static const struct of_device_id ahci_of_match[] = {
461 { .compatible = "calxeda,hb-ahci" },
462 {},
463 };
464 MODULE_DEVICE_TABLE(of, ahci_of_match);
465
466 static int ahci_highbank_probe(struct platform_device *pdev)
467 {
468 struct device *dev = &pdev->dev;
469 struct ahci_host_priv *hpriv;
470 struct ecx_plat_data *pdata;
471 struct ata_host *host;
472 struct resource *mem;
473 int irq;
474 int i;
475 int rc;
476 u32 n_ports;
477 struct ata_port_info pi = ahci_highbank_port_info;
478 const struct ata_port_info *ppi[] = { &pi, NULL };
479
480 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
481 if (!mem) {
482 dev_err(dev, "no mmio space\n");
483 return -EINVAL;
484 }
485
486 irq = platform_get_irq(pdev, 0);
487 if (irq <= 0) {
488 dev_err(dev, "no irq\n");
489 return -EINVAL;
490 }
491
492 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
493 if (!hpriv) {
494 dev_err(dev, "can't alloc ahci_host_priv\n");
495 return -ENOMEM;
496 }
497 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
498 if (!pdata) {
499 dev_err(dev, "can't alloc ecx_plat_data\n");
500 return -ENOMEM;
501 }
502
503 hpriv->flags |= (unsigned long)pi.private_data;
504
505 hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
506 if (!hpriv->mmio) {
507 dev_err(dev, "can't map %pR\n", mem);
508 return -ENOMEM;
509 }
510
511 rc = highbank_initialize_phys(dev, hpriv->mmio);
512 if (rc)
513 return rc;
514
515
516 ahci_save_initial_config(dev, hpriv, 0, 0);
517
518 /* prepare host */
519 if (hpriv->cap & HOST_CAP_NCQ)
520 pi.flags |= ATA_FLAG_NCQ;
521
522 if (hpriv->cap & HOST_CAP_PMP)
523 pi.flags |= ATA_FLAG_PMP;
524
525 if (hpriv->cap & HOST_CAP_64)
526 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
527
528 /* CAP.NP sometimes indicate the index of the last enabled
529 * port, at other times, that of the last possible port, so
530 * determining the maximum port number requires looking at
531 * both CAP.NP and port_map.
532 */
533 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
534
535 pdata->n_ports = n_ports;
536 hpriv->plat_data = pdata;
537 highbank_set_em_messages(dev, hpriv, &pi);
538
539 host = ata_host_alloc_pinfo(dev, ppi, n_ports);
540 if (!host) {
541 rc = -ENOMEM;
542 goto err0;
543 }
544
545 host->private_data = hpriv;
546
547 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
548 host->flags |= ATA_HOST_PARALLEL_SCAN;
549
550 for (i = 0; i < host->n_ports; i++) {
551 struct ata_port *ap = host->ports[i];
552
553 ata_port_desc(ap, "mmio %pR", mem);
554 ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
555
556 /* set enclosure management message type */
557 if (ap->flags & ATA_FLAG_EM)
558 ap->em_message_type = hpriv->em_msg_type;
559
560 /* disabled/not-implemented port */
561 if (!(hpriv->port_map & (1 << i)))
562 ap->ops = &ata_dummy_port_ops;
563 }
564
565 rc = ahci_reset_controller(host);
566 if (rc)
567 goto err0;
568
569 ahci_init_controller(host);
570 ahci_print_info(host, "platform");
571
572 rc = ata_host_activate(host, irq, ahci_interrupt, 0,
573 &ahci_highbank_platform_sht);
574 if (rc)
575 goto err0;
576
577 return 0;
578 err0:
579 return rc;
580 }
581
582 #ifdef CONFIG_PM_SLEEP
583 static int ahci_highbank_suspend(struct device *dev)
584 {
585 struct ata_host *host = dev_get_drvdata(dev);
586 struct ahci_host_priv *hpriv = host->private_data;
587 void __iomem *mmio = hpriv->mmio;
588 u32 ctl;
589 int rc;
590
591 if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
592 dev_err(dev, "firmware update required for suspend/resume\n");
593 return -EIO;
594 }
595
596 /*
597 * AHCI spec rev1.1 section 8.3.3:
598 * Software must disable interrupts prior to requesting a
599 * transition of the HBA to D3 state.
600 */
601 ctl = readl(mmio + HOST_CTL);
602 ctl &= ~HOST_IRQ_EN;
603 writel(ctl, mmio + HOST_CTL);
604 readl(mmio + HOST_CTL); /* flush */
605
606 rc = ata_host_suspend(host, PMSG_SUSPEND);
607 if (rc)
608 return rc;
609
610 return 0;
611 }
612
613 static int ahci_highbank_resume(struct device *dev)
614 {
615 struct ata_host *host = dev_get_drvdata(dev);
616 int rc;
617
618 if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
619 rc = ahci_reset_controller(host);
620 if (rc)
621 return rc;
622
623 ahci_init_controller(host);
624 }
625
626 ata_host_resume(host);
627
628 return 0;
629 }
630 #endif
631
632 static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
633 ahci_highbank_suspend, ahci_highbank_resume);
634
635 static struct platform_driver ahci_highbank_driver = {
636 .remove = ata_platform_remove_one,
637 .driver = {
638 .name = "highbank-ahci",
639 .owner = THIS_MODULE,
640 .of_match_table = ahci_of_match,
641 .pm = &ahci_highbank_pm_ops,
642 },
643 .probe = ahci_highbank_probe,
644 };
645
646 module_platform_driver(ahci_highbank_driver);
647
648 MODULE_DESCRIPTION("Calxeda Highbank AHCI SATA platform driver");
649 MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
650 MODULE_LICENSE("GPL");
651 MODULE_ALIAS("sata:highbank");