]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/spi/spi-dw.c
hyperv: Fix error return code in netvsc_init_buf()
[mirror_ubuntu-artful-kernel.git] / drivers / spi / spi-dw.c
1 /*
2 * Designware SPI core controller driver (refer pxa2xx_spi.c)
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/highmem.h>
24 #include <linux/delay.h>
25 #include <linux/slab.h>
26 #include <linux/spi/spi.h>
27 #include <linux/gpio.h>
28
29 #include "spi-dw.h"
30
31 #ifdef CONFIG_DEBUG_FS
32 #include <linux/debugfs.h>
33 #endif
34
35 #define START_STATE ((void *)0)
36 #define RUNNING_STATE ((void *)1)
37 #define DONE_STATE ((void *)2)
38 #define ERROR_STATE ((void *)-1)
39
40 /* Slave spi_dev related */
41 struct chip_data {
42 u16 cr0;
43 u8 cs; /* chip select pin */
44 u8 n_bytes; /* current is a 1/2/4 byte op */
45 u8 tmode; /* TR/TO/RO/EEPROM */
46 u8 type; /* SPI/SSP/MicroWire */
47
48 u8 poll_mode; /* 1 means use poll mode */
49
50 u32 dma_width;
51 u32 rx_threshold;
52 u32 tx_threshold;
53 u8 enable_dma;
54 u8 bits_per_word;
55 u16 clk_div; /* baud rate divider */
56 u32 speed_hz; /* baud rate */
57 void (*cs_control)(u32 command);
58 };
59
60 #ifdef CONFIG_DEBUG_FS
61 #define SPI_REGS_BUFSIZE 1024
62 static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64 {
65 struct dw_spi *dws;
66 char *buf;
67 u32 len = 0;
68 ssize_t ret;
69
70 dws = file->private_data;
71
72 buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
73 if (!buf)
74 return 0;
75
76 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
77 "MRST SPI0 registers:\n");
78 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
79 "=================================\n");
80 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
81 "CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0));
82 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
83 "CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1));
84 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
85 "SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR));
86 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
87 "SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER));
88 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
89 "BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR));
90 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
91 "TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR));
92 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
93 "RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR));
94 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
95 "TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR));
96 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
97 "RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR));
98 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
99 "SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR));
100 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
101 "IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR));
102 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
103 "ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR));
104 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
105 "DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR));
106 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
107 "DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR));
108 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
109 "DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR));
110 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
111 "=================================\n");
112
113 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
114 kfree(buf);
115 return ret;
116 }
117
118 static const struct file_operations mrst_spi_regs_ops = {
119 .owner = THIS_MODULE,
120 .open = simple_open,
121 .read = spi_show_regs,
122 .llseek = default_llseek,
123 };
124
125 static int mrst_spi_debugfs_init(struct dw_spi *dws)
126 {
127 dws->debugfs = debugfs_create_dir("mrst_spi", NULL);
128 if (!dws->debugfs)
129 return -ENOMEM;
130
131 debugfs_create_file("registers", S_IFREG | S_IRUGO,
132 dws->debugfs, (void *)dws, &mrst_spi_regs_ops);
133 return 0;
134 }
135
136 static void mrst_spi_debugfs_remove(struct dw_spi *dws)
137 {
138 if (dws->debugfs)
139 debugfs_remove_recursive(dws->debugfs);
140 }
141
142 #else
143 static inline int mrst_spi_debugfs_init(struct dw_spi *dws)
144 {
145 return 0;
146 }
147
148 static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
149 {
150 }
151 #endif /* CONFIG_DEBUG_FS */
152
153 /* Return the max entries we can fill into tx fifo */
154 static inline u32 tx_max(struct dw_spi *dws)
155 {
156 u32 tx_left, tx_room, rxtx_gap;
157
158 tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
159 tx_room = dws->fifo_len - dw_readw(dws, DW_SPI_TXFLR);
160
161 /*
162 * Another concern is about the tx/rx mismatch, we
163 * though to use (dws->fifo_len - rxflr - txflr) as
164 * one maximum value for tx, but it doesn't cover the
165 * data which is out of tx/rx fifo and inside the
166 * shift registers. So a control from sw point of
167 * view is taken.
168 */
169 rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
170 / dws->n_bytes;
171
172 return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
173 }
174
175 /* Return the max entries we should read out of rx fifo */
176 static inline u32 rx_max(struct dw_spi *dws)
177 {
178 u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
179
180 return min(rx_left, (u32)dw_readw(dws, DW_SPI_RXFLR));
181 }
182
183 static void dw_writer(struct dw_spi *dws)
184 {
185 u32 max = tx_max(dws);
186 u16 txw = 0;
187
188 while (max--) {
189 /* Set the tx word if the transfer's original "tx" is not null */
190 if (dws->tx_end - dws->len) {
191 if (dws->n_bytes == 1)
192 txw = *(u8 *)(dws->tx);
193 else
194 txw = *(u16 *)(dws->tx);
195 }
196 dw_writew(dws, DW_SPI_DR, txw);
197 dws->tx += dws->n_bytes;
198 }
199 }
200
201 static void dw_reader(struct dw_spi *dws)
202 {
203 u32 max = rx_max(dws);
204 u16 rxw;
205
206 while (max--) {
207 rxw = dw_readw(dws, DW_SPI_DR);
208 /* Care rx only if the transfer's original "rx" is not null */
209 if (dws->rx_end - dws->len) {
210 if (dws->n_bytes == 1)
211 *(u8 *)(dws->rx) = rxw;
212 else
213 *(u16 *)(dws->rx) = rxw;
214 }
215 dws->rx += dws->n_bytes;
216 }
217 }
218
219 static void *next_transfer(struct dw_spi *dws)
220 {
221 struct spi_message *msg = dws->cur_msg;
222 struct spi_transfer *trans = dws->cur_transfer;
223
224 /* Move to next transfer */
225 if (trans->transfer_list.next != &msg->transfers) {
226 dws->cur_transfer =
227 list_entry(trans->transfer_list.next,
228 struct spi_transfer,
229 transfer_list);
230 return RUNNING_STATE;
231 } else
232 return DONE_STATE;
233 }
234
235 /*
236 * Note: first step is the protocol driver prepares
237 * a dma-capable memory, and this func just need translate
238 * the virt addr to physical
239 */
240 static int map_dma_buffers(struct dw_spi *dws)
241 {
242 if (!dws->cur_msg->is_dma_mapped
243 || !dws->dma_inited
244 || !dws->cur_chip->enable_dma
245 || !dws->dma_ops)
246 return 0;
247
248 if (dws->cur_transfer->tx_dma)
249 dws->tx_dma = dws->cur_transfer->tx_dma;
250
251 if (dws->cur_transfer->rx_dma)
252 dws->rx_dma = dws->cur_transfer->rx_dma;
253
254 return 1;
255 }
256
257 /* Caller already set message->status; dma and pio irqs are blocked */
258 static void giveback(struct dw_spi *dws)
259 {
260 struct spi_transfer *last_transfer;
261 struct spi_message *msg;
262
263 msg = dws->cur_msg;
264 dws->cur_msg = NULL;
265 dws->cur_transfer = NULL;
266 dws->prev_chip = dws->cur_chip;
267 dws->cur_chip = NULL;
268 dws->dma_mapped = 0;
269
270 last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
271 transfer_list);
272
273 if (!last_transfer->cs_change)
274 spi_chip_sel(dws, dws->cur_msg->spi, 0);
275
276 spi_finalize_current_message(dws->master);
277 }
278
279 static void int_error_stop(struct dw_spi *dws, const char *msg)
280 {
281 /* Stop the hw */
282 spi_enable_chip(dws, 0);
283
284 dev_err(&dws->master->dev, "%s\n", msg);
285 dws->cur_msg->state = ERROR_STATE;
286 tasklet_schedule(&dws->pump_transfers);
287 }
288
289 void dw_spi_xfer_done(struct dw_spi *dws)
290 {
291 /* Update total byte transferred return count actual bytes read */
292 dws->cur_msg->actual_length += dws->len;
293
294 /* Move to next transfer */
295 dws->cur_msg->state = next_transfer(dws);
296
297 /* Handle end of message */
298 if (dws->cur_msg->state == DONE_STATE) {
299 dws->cur_msg->status = 0;
300 giveback(dws);
301 } else
302 tasklet_schedule(&dws->pump_transfers);
303 }
304 EXPORT_SYMBOL_GPL(dw_spi_xfer_done);
305
306 static irqreturn_t interrupt_transfer(struct dw_spi *dws)
307 {
308 u16 irq_status = dw_readw(dws, DW_SPI_ISR);
309
310 /* Error handling */
311 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
312 dw_readw(dws, DW_SPI_TXOICR);
313 dw_readw(dws, DW_SPI_RXOICR);
314 dw_readw(dws, DW_SPI_RXUICR);
315 int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
316 return IRQ_HANDLED;
317 }
318
319 dw_reader(dws);
320 if (dws->rx_end == dws->rx) {
321 spi_mask_intr(dws, SPI_INT_TXEI);
322 dw_spi_xfer_done(dws);
323 return IRQ_HANDLED;
324 }
325 if (irq_status & SPI_INT_TXEI) {
326 spi_mask_intr(dws, SPI_INT_TXEI);
327 dw_writer(dws);
328 /* Enable TX irq always, it will be disabled when RX finished */
329 spi_umask_intr(dws, SPI_INT_TXEI);
330 }
331
332 return IRQ_HANDLED;
333 }
334
335 static irqreturn_t dw_spi_irq(int irq, void *dev_id)
336 {
337 struct dw_spi *dws = dev_id;
338 u16 irq_status = dw_readw(dws, DW_SPI_ISR) & 0x3f;
339
340 if (!irq_status)
341 return IRQ_NONE;
342
343 if (!dws->cur_msg) {
344 spi_mask_intr(dws, SPI_INT_TXEI);
345 return IRQ_HANDLED;
346 }
347
348 return dws->transfer_handler(dws);
349 }
350
351 /* Must be called inside pump_transfers() */
352 static void poll_transfer(struct dw_spi *dws)
353 {
354 do {
355 dw_writer(dws);
356 dw_reader(dws);
357 cpu_relax();
358 } while (dws->rx_end > dws->rx);
359
360 dw_spi_xfer_done(dws);
361 }
362
363 static void pump_transfers(unsigned long data)
364 {
365 struct dw_spi *dws = (struct dw_spi *)data;
366 struct spi_message *message = NULL;
367 struct spi_transfer *transfer = NULL;
368 struct spi_transfer *previous = NULL;
369 struct spi_device *spi = NULL;
370 struct chip_data *chip = NULL;
371 u8 bits = 0;
372 u8 imask = 0;
373 u8 cs_change = 0;
374 u16 txint_level = 0;
375 u16 clk_div = 0;
376 u32 speed = 0;
377 u32 cr0 = 0;
378
379 /* Get current state information */
380 message = dws->cur_msg;
381 transfer = dws->cur_transfer;
382 chip = dws->cur_chip;
383 spi = message->spi;
384
385 if (unlikely(!chip->clk_div))
386 chip->clk_div = dws->max_freq / chip->speed_hz;
387
388 if (message->state == ERROR_STATE) {
389 message->status = -EIO;
390 goto early_exit;
391 }
392
393 /* Handle end of message */
394 if (message->state == DONE_STATE) {
395 message->status = 0;
396 goto early_exit;
397 }
398
399 /* Delay if requested at end of transfer*/
400 if (message->state == RUNNING_STATE) {
401 previous = list_entry(transfer->transfer_list.prev,
402 struct spi_transfer,
403 transfer_list);
404 if (previous->delay_usecs)
405 udelay(previous->delay_usecs);
406 }
407
408 dws->n_bytes = chip->n_bytes;
409 dws->dma_width = chip->dma_width;
410 dws->cs_control = chip->cs_control;
411
412 dws->rx_dma = transfer->rx_dma;
413 dws->tx_dma = transfer->tx_dma;
414 dws->tx = (void *)transfer->tx_buf;
415 dws->tx_end = dws->tx + transfer->len;
416 dws->rx = transfer->rx_buf;
417 dws->rx_end = dws->rx + transfer->len;
418 dws->len = dws->cur_transfer->len;
419 if (chip != dws->prev_chip)
420 cs_change = 1;
421
422 cr0 = chip->cr0;
423
424 /* Handle per transfer options for bpw and speed */
425 if (transfer->speed_hz) {
426 speed = chip->speed_hz;
427
428 if (transfer->speed_hz != speed) {
429 speed = transfer->speed_hz;
430
431 /* clk_div doesn't support odd number */
432 clk_div = dws->max_freq / speed;
433 clk_div = (clk_div + 1) & 0xfffe;
434
435 chip->speed_hz = speed;
436 chip->clk_div = clk_div;
437 }
438 }
439 if (transfer->bits_per_word) {
440 bits = transfer->bits_per_word;
441 dws->n_bytes = dws->dma_width = bits >> 3;
442 cr0 = (bits - 1)
443 | (chip->type << SPI_FRF_OFFSET)
444 | (spi->mode << SPI_MODE_OFFSET)
445 | (chip->tmode << SPI_TMOD_OFFSET);
446 }
447 message->state = RUNNING_STATE;
448
449 /*
450 * Adjust transfer mode if necessary. Requires platform dependent
451 * chipselect mechanism.
452 */
453 if (dws->cs_control) {
454 if (dws->rx && dws->tx)
455 chip->tmode = SPI_TMOD_TR;
456 else if (dws->rx)
457 chip->tmode = SPI_TMOD_RO;
458 else
459 chip->tmode = SPI_TMOD_TO;
460
461 cr0 &= ~SPI_TMOD_MASK;
462 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
463 }
464
465 /* Check if current transfer is a DMA transaction */
466 dws->dma_mapped = map_dma_buffers(dws);
467
468 /*
469 * Interrupt mode
470 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
471 */
472 if (!dws->dma_mapped && !chip->poll_mode) {
473 int templen = dws->len / dws->n_bytes;
474 txint_level = dws->fifo_len / 2;
475 txint_level = (templen > txint_level) ? txint_level : templen;
476
477 imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI;
478 dws->transfer_handler = interrupt_transfer;
479 }
480
481 /*
482 * Reprogram registers only if
483 * 1. chip select changes
484 * 2. clk_div is changed
485 * 3. control value changes
486 */
487 if (dw_readw(dws, DW_SPI_CTRL0) != cr0 || cs_change || clk_div || imask) {
488 spi_enable_chip(dws, 0);
489
490 if (dw_readw(dws, DW_SPI_CTRL0) != cr0)
491 dw_writew(dws, DW_SPI_CTRL0, cr0);
492
493 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
494 spi_chip_sel(dws, spi, 1);
495
496 /* Set the interrupt mask, for poll mode just disable all int */
497 spi_mask_intr(dws, 0xff);
498 if (imask)
499 spi_umask_intr(dws, imask);
500 if (txint_level)
501 dw_writew(dws, DW_SPI_TXFLTR, txint_level);
502
503 spi_enable_chip(dws, 1);
504 if (cs_change)
505 dws->prev_chip = chip;
506 }
507
508 if (dws->dma_mapped)
509 dws->dma_ops->dma_transfer(dws, cs_change);
510
511 if (chip->poll_mode)
512 poll_transfer(dws);
513
514 return;
515
516 early_exit:
517 giveback(dws);
518 return;
519 }
520
521 static int dw_spi_transfer_one_message(struct spi_master *master,
522 struct spi_message *msg)
523 {
524 struct dw_spi *dws = spi_master_get_devdata(master);
525
526 dws->cur_msg = msg;
527 /* Initial message state*/
528 dws->cur_msg->state = START_STATE;
529 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
530 struct spi_transfer,
531 transfer_list);
532 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
533
534 /* Launch transfers */
535 tasklet_schedule(&dws->pump_transfers);
536
537 return 0;
538 }
539
540 /* This may be called twice for each spi dev */
541 static int dw_spi_setup(struct spi_device *spi)
542 {
543 struct dw_spi_chip *chip_info = NULL;
544 struct chip_data *chip;
545 int ret;
546
547 /* Only alloc on first setup */
548 chip = spi_get_ctldata(spi);
549 if (!chip) {
550 chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
551 GFP_KERNEL);
552 if (!chip)
553 return -ENOMEM;
554 spi_set_ctldata(spi, chip);
555 }
556
557 /*
558 * Protocol drivers may change the chip settings, so...
559 * if chip_info exists, use it
560 */
561 chip_info = spi->controller_data;
562
563 /* chip_info doesn't always exist */
564 if (chip_info) {
565 if (chip_info->cs_control)
566 chip->cs_control = chip_info->cs_control;
567
568 chip->poll_mode = chip_info->poll_mode;
569 chip->type = chip_info->type;
570
571 chip->rx_threshold = 0;
572 chip->tx_threshold = 0;
573
574 chip->enable_dma = chip_info->enable_dma;
575 }
576
577 if (spi->bits_per_word == 8) {
578 chip->n_bytes = 1;
579 chip->dma_width = 1;
580 } else if (spi->bits_per_word == 16) {
581 chip->n_bytes = 2;
582 chip->dma_width = 2;
583 }
584 chip->bits_per_word = spi->bits_per_word;
585
586 if (!spi->max_speed_hz) {
587 dev_err(&spi->dev, "No max speed HZ parameter\n");
588 return -EINVAL;
589 }
590 chip->speed_hz = spi->max_speed_hz;
591
592 chip->tmode = 0; /* Tx & Rx */
593 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
594 chip->cr0 = (chip->bits_per_word - 1)
595 | (chip->type << SPI_FRF_OFFSET)
596 | (spi->mode << SPI_MODE_OFFSET)
597 | (chip->tmode << SPI_TMOD_OFFSET);
598
599 if (gpio_is_valid(spi->cs_gpio)) {
600 ret = gpio_direction_output(spi->cs_gpio,
601 !(spi->mode & SPI_CS_HIGH));
602 if (ret)
603 return ret;
604 }
605
606 return 0;
607 }
608
609 /* Restart the controller, disable all interrupts, clean rx fifo */
610 static void spi_hw_init(struct dw_spi *dws)
611 {
612 spi_enable_chip(dws, 0);
613 spi_mask_intr(dws, 0xff);
614 spi_enable_chip(dws, 1);
615
616 /*
617 * Try to detect the FIFO depth if not set by interface driver,
618 * the depth could be from 2 to 256 from HW spec
619 */
620 if (!dws->fifo_len) {
621 u32 fifo;
622 for (fifo = 2; fifo <= 257; fifo++) {
623 dw_writew(dws, DW_SPI_TXFLTR, fifo);
624 if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
625 break;
626 }
627
628 dws->fifo_len = (fifo == 257) ? 0 : fifo;
629 dw_writew(dws, DW_SPI_TXFLTR, 0);
630 }
631 }
632
633 int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
634 {
635 struct spi_master *master;
636 int ret;
637
638 BUG_ON(dws == NULL);
639
640 master = spi_alloc_master(dev, 0);
641 if (!master)
642 return -ENOMEM;
643
644 dws->master = master;
645 dws->type = SSI_MOTO_SPI;
646 dws->prev_chip = NULL;
647 dws->dma_inited = 0;
648 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
649 snprintf(dws->name, sizeof(dws->name), "dw_spi%d",
650 dws->bus_num);
651
652 ret = devm_request_irq(dev, dws->irq, dw_spi_irq, IRQF_SHARED,
653 dws->name, dws);
654 if (ret < 0) {
655 dev_err(&master->dev, "can not get IRQ\n");
656 goto err_free_master;
657 }
658
659 master->mode_bits = SPI_CPOL | SPI_CPHA;
660 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
661 master->bus_num = dws->bus_num;
662 master->num_chipselect = dws->num_cs;
663 master->setup = dw_spi_setup;
664 master->transfer_one_message = dw_spi_transfer_one_message;
665 master->max_speed_hz = dws->max_freq;
666
667 /* Basic HW init */
668 spi_hw_init(dws);
669
670 if (dws->dma_ops && dws->dma_ops->dma_init) {
671 ret = dws->dma_ops->dma_init(dws);
672 if (ret) {
673 dev_warn(&master->dev, "DMA init failed\n");
674 dws->dma_inited = 0;
675 }
676 }
677
678 tasklet_init(&dws->pump_transfers, pump_transfers, (unsigned long)dws);
679
680 spi_master_set_devdata(master, dws);
681 ret = devm_spi_register_master(dev, master);
682 if (ret) {
683 dev_err(&master->dev, "problem registering spi master\n");
684 goto err_dma_exit;
685 }
686
687 mrst_spi_debugfs_init(dws);
688 return 0;
689
690 err_dma_exit:
691 if (dws->dma_ops && dws->dma_ops->dma_exit)
692 dws->dma_ops->dma_exit(dws);
693 spi_enable_chip(dws, 0);
694 err_free_master:
695 spi_master_put(master);
696 return ret;
697 }
698 EXPORT_SYMBOL_GPL(dw_spi_add_host);
699
700 void dw_spi_remove_host(struct dw_spi *dws)
701 {
702 if (!dws)
703 return;
704 mrst_spi_debugfs_remove(dws);
705
706 if (dws->dma_ops && dws->dma_ops->dma_exit)
707 dws->dma_ops->dma_exit(dws);
708 spi_enable_chip(dws, 0);
709 /* Disable clk */
710 spi_set_clk(dws, 0);
711 }
712 EXPORT_SYMBOL_GPL(dw_spi_remove_host);
713
714 int dw_spi_suspend_host(struct dw_spi *dws)
715 {
716 int ret = 0;
717
718 ret = spi_master_suspend(dws->master);
719 if (ret)
720 return ret;
721 spi_enable_chip(dws, 0);
722 spi_set_clk(dws, 0);
723 return ret;
724 }
725 EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
726
727 int dw_spi_resume_host(struct dw_spi *dws)
728 {
729 int ret;
730
731 spi_hw_init(dws);
732 ret = spi_master_resume(dws->master);
733 if (ret)
734 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
735 return ret;
736 }
737 EXPORT_SYMBOL_GPL(dw_spi_resume_host);
738
739 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
740 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
741 MODULE_LICENSE("GPL v2");