]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/mmc/host/bcm2835-sdhost.c
Adding bcm2835-sdhost driver, and an overlay to enable it
[mirror_ubuntu-zesty-kernel.git] / drivers / mmc / host / bcm2835-sdhost.c
1 /*
2 * BCM2835 SD host driver.
3 *
4 * Author: Phil Elwell <phil@raspberrypi.org>
5 * Copyright (C) 2015-2016 Raspberry Pi (Trading) Ltd.
6 *
7 * Based on
8 * mmc-bcm2835.c by Gellert Weisz
9 * which is, in turn, based on
10 * sdhci-bcm2708.c by Broadcom
11 * sdhci-bcm2835.c by Stephen Warren and Oleksandr Tymoshenko
12 * sdhci.c and sdhci-pci.c by Pierre Ossman
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms and conditions of the GNU General Public License,
16 * version 2, as published by the Free Software Foundation.
17 *
18 * This program is distributed in the hope it will be useful, but WITHOUT
19 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program. If not, see <http://www.gnu.org/licenses/>.
25 */
26
27 #define FIFO_READ_THRESHOLD 4
28 #define FIFO_WRITE_THRESHOLD 4
29 #define ALLOW_CMD23_READ 1
30 #define ALLOW_CMD23_WRITE 0
31 #define ENABLE_LOG 1
32 #define SDDATA_FIFO_PIO_BURST 8
33 #define CMD_DALLY_US 1
34
35 #include <linux/delay.h>
36 #include <linux/module.h>
37 #include <linux/io.h>
38 #include <linux/mmc/mmc.h>
39 #include <linux/mmc/host.h>
40 #include <linux/mmc/sd.h>
41 #include <linux/mmc/sdio.h>
42 #include <linux/scatterlist.h>
43 #include <linux/of_address.h>
44 #include <linux/of_irq.h>
45 #include <linux/clk.h>
46 #include <linux/platform_device.h>
47 #include <linux/err.h>
48 #include <linux/blkdev.h>
49 #include <linux/dmaengine.h>
50 #include <linux/dma-mapping.h>
51 #include <linux/of_dma.h>
52 #include <linux/time.h>
53 #include <linux/workqueue.h>
54 #include <soc/bcm2835/raspberrypi-firmware.h>
55
56 #define DRIVER_NAME "sdhost-bcm2835"
57
58 #define SDCMD 0x00 /* Command to SD card - 16 R/W */
59 #define SDARG 0x04 /* Argument to SD card - 32 R/W */
60 #define SDTOUT 0x08 /* Start value for timeout counter - 32 R/W */
61 #define SDCDIV 0x0c /* Start value for clock divider - 11 R/W */
62 #define SDRSP0 0x10 /* SD card response (31:0) - 32 R */
63 #define SDRSP1 0x14 /* SD card response (63:32) - 32 R */
64 #define SDRSP2 0x18 /* SD card response (95:64) - 32 R */
65 #define SDRSP3 0x1c /* SD card response (127:96) - 32 R */
66 #define SDHSTS 0x20 /* SD host status - 11 R */
67 #define SDVDD 0x30 /* SD card power control - 1 R/W */
68 #define SDEDM 0x34 /* Emergency Debug Mode - 13 R/W */
69 #define SDHCFG 0x38 /* Host configuration - 2 R/W */
70 #define SDHBCT 0x3c /* Host byte count (debug) - 32 R/W */
71 #define SDDATA 0x40 /* Data to/from SD card - 32 R/W */
72 #define SDHBLC 0x50 /* Host block count (SDIO/SDHC) - 9 R/W */
73
74 #define SDCMD_NEW_FLAG 0x8000
75 #define SDCMD_FAIL_FLAG 0x4000
76 #define SDCMD_BUSYWAIT 0x800
77 #define SDCMD_NO_RESPONSE 0x400
78 #define SDCMD_LONG_RESPONSE 0x200
79 #define SDCMD_WRITE_CMD 0x80
80 #define SDCMD_READ_CMD 0x40
81 #define SDCMD_CMD_MASK 0x3f
82
83 #define SDCDIV_MAX_CDIV 0x7ff
84
85 #define SDHSTS_BUSY_IRPT 0x400
86 #define SDHSTS_BLOCK_IRPT 0x200
87 #define SDHSTS_SDIO_IRPT 0x100
88 #define SDHSTS_REW_TIME_OUT 0x80
89 #define SDHSTS_CMD_TIME_OUT 0x40
90 #define SDHSTS_CRC16_ERROR 0x20
91 #define SDHSTS_CRC7_ERROR 0x10
92 #define SDHSTS_FIFO_ERROR 0x08
93 /* Reserved */
94 /* Reserved */
95 #define SDHSTS_DATA_FLAG 0x01
96
97 #define SDHSTS_TRANSFER_ERROR_MASK (SDHSTS_CRC7_ERROR|SDHSTS_CRC16_ERROR|SDHSTS_REW_TIME_OUT|SDHSTS_FIFO_ERROR)
98 #define SDHSTS_ERROR_MASK (SDHSTS_CMD_TIME_OUT|SDHSTS_TRANSFER_ERROR_MASK)
99
100 #define SDHCFG_BUSY_IRPT_EN (1<<10)
101 #define SDHCFG_BLOCK_IRPT_EN (1<<8)
102 #define SDHCFG_SDIO_IRPT_EN (1<<5)
103 #define SDHCFG_DATA_IRPT_EN (1<<4)
104 #define SDHCFG_SLOW_CARD (1<<3)
105 #define SDHCFG_WIDE_EXT_BUS (1<<2)
106 #define SDHCFG_WIDE_INT_BUS (1<<1)
107 #define SDHCFG_REL_CMD_LINE (1<<0)
108
109 #define SDEDM_FORCE_DATA_MODE (1<<19)
110 #define SDEDM_CLOCK_PULSE (1<<20)
111 #define SDEDM_BYPASS (1<<21)
112
113 #define SDEDM_WRITE_THRESHOLD_SHIFT 9
114 #define SDEDM_READ_THRESHOLD_SHIFT 14
115 #define SDEDM_THRESHOLD_MASK 0x1f
116
117 #define SDEDM_FSM_MASK 0xf
118 #define SDEDM_FSM_IDENTMODE 0x0
119 #define SDEDM_FSM_DATAMODE 0x1
120 #define SDEDM_FSM_READDATA 0x2
121 #define SDEDM_FSM_WRITEDATA 0x3
122 #define SDEDM_FSM_READWAIT 0x4
123 #define SDEDM_FSM_READCRC 0x5
124 #define SDEDM_FSM_WRITECRC 0x6
125 #define SDEDM_FSM_WRITEWAIT1 0x7
126 #define SDEDM_FSM_POWERDOWN 0x8
127 #define SDEDM_FSM_POWERUP 0x9
128 #define SDEDM_FSM_WRITESTART1 0xa
129 #define SDEDM_FSM_WRITESTART2 0xb
130 #define SDEDM_FSM_GENPULSES 0xc
131 #define SDEDM_FSM_WRITEWAIT2 0xd
132 #define SDEDM_FSM_STARTPOWDOWN 0xf
133
134 #define SDDATA_FIFO_WORDS 16
135
136 #define USE_CMD23_FLAGS ((ALLOW_CMD23_READ * MMC_DATA_READ) | \
137 (ALLOW_CMD23_WRITE * MMC_DATA_WRITE))
138
139 #define MHZ 1000000
140
141
142 struct bcm2835_host {
143 spinlock_t lock;
144
145 void __iomem *ioaddr;
146 u32 bus_addr;
147
148 struct mmc_host *mmc;
149
150 u32 pio_timeout; /* In jiffies */
151
152 int clock; /* Current clock speed */
153
154 bool slow_card; /* Force 11-bit divisor */
155
156 unsigned int max_clk; /* Max possible freq */
157
158 struct tasklet_struct finish_tasklet; /* Tasklet structures */
159
160 struct work_struct cmd_wait_wq; /* Workqueue function */
161
162 struct timer_list timer; /* Timer for timeouts */
163
164 struct sg_mapping_iter sg_miter; /* SG state for PIO */
165 unsigned int blocks; /* remaining PIO blocks */
166
167 int irq; /* Device IRQ */
168
169 u32 cmd_quick_poll_retries;
170 u32 ns_per_fifo_word;
171
172 /* cached registers */
173 u32 hcfg;
174 u32 cdiv;
175
176 struct mmc_request *mrq; /* Current request */
177 struct mmc_command *cmd; /* Current command */
178 struct mmc_data *data; /* Current data request */
179 unsigned int data_complete:1; /* Data finished before cmd */
180
181 unsigned int flush_fifo:1; /* Drain the fifo when finishing */
182
183 unsigned int use_busy:1; /* Wait for busy interrupt */
184
185 unsigned int use_sbc:1; /* Send CMD23 */
186
187 unsigned int debug:1; /* Enable debug output */
188 unsigned int firmware_sets_cdiv:1; /* Let the firmware manage the clock */
189 unsigned int reset_clock:1; /* Reset the clock fore the next request */
190
191 /*DMA part*/
192 struct dma_chan *dma_chan_rxtx; /* DMA channel for reads and writes */
193 struct dma_chan *dma_chan; /* Channel in use */
194 struct dma_slave_config dma_cfg_rx;
195 struct dma_slave_config dma_cfg_tx;
196 struct dma_async_tx_descriptor *dma_desc;
197 u32 dma_dir;
198 u32 drain_words;
199 struct page *drain_page;
200 u32 drain_offset;
201
202 bool allow_dma;
203 bool use_dma;
204 /*end of DMA part*/
205
206 int max_delay; /* maximum length of time spent waiting */
207 struct timeval stop_time; /* when the last stop was issued */
208 u32 delay_after_stop; /* minimum time between stop and subsequent data transfer */
209 u32 delay_after_this_stop; /* minimum time between this stop and subsequent data transfer */
210 u32 user_overclock_50; /* User's preferred frequency to use when 50MHz is requested (in MHz) */
211 u32 overclock_50; /* frequency to use when 50MHz is requested (in MHz) */
212 u32 overclock; /* Current frequency if overclocked, else zero */
213 u32 pio_limit; /* Maximum block count for PIO (0 = always DMA) */
214
215 u32 sectors; /* Cached card size in sectors */
216 };
217
218 #if ENABLE_LOG
219
220 struct log_entry_struct {
221 char event[4];
222 u32 timestamp;
223 u32 param1;
224 u32 param2;
225 };
226
227 typedef struct log_entry_struct LOG_ENTRY_T;
228
229 LOG_ENTRY_T *sdhost_log_buf;
230 dma_addr_t sdhost_log_addr;
231 static u32 sdhost_log_idx;
232 static spinlock_t log_lock;
233 static void __iomem *timer_base;
234
235 #define LOG_ENTRIES (256*1)
236 #define LOG_SIZE (sizeof(LOG_ENTRY_T)*LOG_ENTRIES)
237
238 static void log_init(struct device *dev, u32 bus_to_phys)
239 {
240 spin_lock_init(&log_lock);
241 sdhost_log_buf = dma_zalloc_coherent(dev, LOG_SIZE, &sdhost_log_addr,
242 GFP_KERNEL);
243 if (sdhost_log_buf) {
244 pr_info("sdhost: log_buf @ %p (%x)\n",
245 sdhost_log_buf, sdhost_log_addr);
246 timer_base = ioremap_nocache(bus_to_phys + 0x7e003000, SZ_4K);
247 if (!timer_base)
248 pr_err("sdhost: failed to remap timer\n");
249 }
250 else
251 pr_err("sdhost: failed to allocate log buf\n");
252 }
253
254 static void log_event_impl(const char *event, u32 param1, u32 param2)
255 {
256 if (sdhost_log_buf) {
257 LOG_ENTRY_T *entry;
258 unsigned long flags;
259
260 spin_lock_irqsave(&log_lock, flags);
261
262 entry = sdhost_log_buf + sdhost_log_idx;
263 memcpy(entry->event, event, 4);
264 entry->timestamp = (readl(timer_base + 4) & 0x3fffffff) +
265 (smp_processor_id()<<30);
266 entry->param1 = param1;
267 entry->param2 = param2;
268 sdhost_log_idx = (sdhost_log_idx + 1) % LOG_ENTRIES;
269
270 spin_unlock_irqrestore(&log_lock, flags);
271 }
272 }
273
274 static void log_dump(void)
275 {
276 if (sdhost_log_buf) {
277 LOG_ENTRY_T *entry;
278 unsigned long flags;
279 int idx;
280
281 spin_lock_irqsave(&log_lock, flags);
282
283 idx = sdhost_log_idx;
284 do {
285 entry = sdhost_log_buf + idx;
286 if (entry->event[0] != '\0')
287 pr_info("[%08x] %.4s %x %x\n",
288 entry->timestamp,
289 entry->event,
290 entry->param1,
291 entry->param2);
292 idx = (idx + 1) % LOG_ENTRIES;
293 } while (idx != sdhost_log_idx);
294
295 spin_unlock_irqrestore(&log_lock, flags);
296 }
297 }
298
299 #define log_event(event, param1, param2) log_event_impl(event, param1, param2)
300
301 #else
302
303 #define log_init(x) (void)0
304 #define log_event(event, param1, param2) (void)0
305 #define log_dump() (void)0
306
307 #endif
308
309 static inline void bcm2835_sdhost_write(struct bcm2835_host *host, u32 val, int reg)
310 {
311 writel(val, host->ioaddr + reg);
312 }
313
314 static inline u32 bcm2835_sdhost_read(struct bcm2835_host *host, int reg)
315 {
316 return readl(host->ioaddr + reg);
317 }
318
319 static inline u32 bcm2835_sdhost_read_relaxed(struct bcm2835_host *host, int reg)
320 {
321 return readl_relaxed(host->ioaddr + reg);
322 }
323
324 static void bcm2835_sdhost_dumpcmd(struct bcm2835_host *host,
325 struct mmc_command *cmd,
326 const char *label)
327 {
328 if (cmd)
329 pr_info("%s:%c%s op %d arg 0x%x flags 0x%x - resp %08x %08x %08x %08x, err %d\n",
330 mmc_hostname(host->mmc),
331 (cmd == host->cmd) ? '>' : ' ',
332 label, cmd->opcode, cmd->arg, cmd->flags,
333 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3],
334 cmd->error);
335 }
336
337 static void bcm2835_sdhost_dumpregs(struct bcm2835_host *host)
338 {
339 if (host->mrq)
340 {
341 bcm2835_sdhost_dumpcmd(host, host->mrq->sbc, "sbc");
342 bcm2835_sdhost_dumpcmd(host, host->mrq->cmd, "cmd");
343 if (host->mrq->data)
344 pr_info("%s: data blocks %x blksz %x - err %d\n",
345 mmc_hostname(host->mmc),
346 host->mrq->data->blocks,
347 host->mrq->data->blksz,
348 host->mrq->data->error);
349 bcm2835_sdhost_dumpcmd(host, host->mrq->stop, "stop");
350 }
351
352 pr_info("%s: =========== REGISTER DUMP ===========\n",
353 mmc_hostname(host->mmc));
354
355 pr_info("%s: SDCMD 0x%08x\n",
356 mmc_hostname(host->mmc),
357 bcm2835_sdhost_read(host, SDCMD));
358 pr_info("%s: SDARG 0x%08x\n",
359 mmc_hostname(host->mmc),
360 bcm2835_sdhost_read(host, SDARG));
361 pr_info("%s: SDTOUT 0x%08x\n",
362 mmc_hostname(host->mmc),
363 bcm2835_sdhost_read(host, SDTOUT));
364 pr_info("%s: SDCDIV 0x%08x\n",
365 mmc_hostname(host->mmc),
366 bcm2835_sdhost_read(host, SDCDIV));
367 pr_info("%s: SDRSP0 0x%08x\n",
368 mmc_hostname(host->mmc),
369 bcm2835_sdhost_read(host, SDRSP0));
370 pr_info("%s: SDRSP1 0x%08x\n",
371 mmc_hostname(host->mmc),
372 bcm2835_sdhost_read(host, SDRSP1));
373 pr_info("%s: SDRSP2 0x%08x\n",
374 mmc_hostname(host->mmc),
375 bcm2835_sdhost_read(host, SDRSP2));
376 pr_info("%s: SDRSP3 0x%08x\n",
377 mmc_hostname(host->mmc),
378 bcm2835_sdhost_read(host, SDRSP3));
379 pr_info("%s: SDHSTS 0x%08x\n",
380 mmc_hostname(host->mmc),
381 bcm2835_sdhost_read(host, SDHSTS));
382 pr_info("%s: SDVDD 0x%08x\n",
383 mmc_hostname(host->mmc),
384 bcm2835_sdhost_read(host, SDVDD));
385 pr_info("%s: SDEDM 0x%08x\n",
386 mmc_hostname(host->mmc),
387 bcm2835_sdhost_read(host, SDEDM));
388 pr_info("%s: SDHCFG 0x%08x\n",
389 mmc_hostname(host->mmc),
390 bcm2835_sdhost_read(host, SDHCFG));
391 pr_info("%s: SDHBCT 0x%08x\n",
392 mmc_hostname(host->mmc),
393 bcm2835_sdhost_read(host, SDHBCT));
394 pr_info("%s: SDHBLC 0x%08x\n",
395 mmc_hostname(host->mmc),
396 bcm2835_sdhost_read(host, SDHBLC));
397
398 pr_info("%s: ===========================================\n",
399 mmc_hostname(host->mmc));
400 }
401
402 static void bcm2835_sdhost_set_power(struct bcm2835_host *host, bool on)
403 {
404 bcm2835_sdhost_write(host, on ? 1 : 0, SDVDD);
405 }
406
407 static void bcm2835_sdhost_reset_internal(struct bcm2835_host *host)
408 {
409 u32 temp;
410
411 if (host->debug)
412 pr_info("%s: reset\n", mmc_hostname(host->mmc));
413
414 bcm2835_sdhost_set_power(host, false);
415
416 bcm2835_sdhost_write(host, 0, SDCMD);
417 bcm2835_sdhost_write(host, 0, SDARG);
418 bcm2835_sdhost_write(host, 0xf00000, SDTOUT);
419 bcm2835_sdhost_write(host, 0, SDCDIV);
420 bcm2835_sdhost_write(host, 0x7f8, SDHSTS); /* Write 1s to clear */
421 bcm2835_sdhost_write(host, 0, SDHCFG);
422 bcm2835_sdhost_write(host, 0, SDHBCT);
423 bcm2835_sdhost_write(host, 0, SDHBLC);
424
425 /* Limit fifo usage due to silicon bug */
426 temp = bcm2835_sdhost_read(host, SDEDM);
427 temp &= ~((SDEDM_THRESHOLD_MASK<<SDEDM_READ_THRESHOLD_SHIFT) |
428 (SDEDM_THRESHOLD_MASK<<SDEDM_WRITE_THRESHOLD_SHIFT));
429 temp |= (FIFO_READ_THRESHOLD << SDEDM_READ_THRESHOLD_SHIFT) |
430 (FIFO_WRITE_THRESHOLD << SDEDM_WRITE_THRESHOLD_SHIFT);
431 bcm2835_sdhost_write(host, temp, SDEDM);
432 mdelay(10);
433 bcm2835_sdhost_set_power(host, true);
434 mdelay(10);
435 host->clock = 0;
436 host->sectors = 0;
437 bcm2835_sdhost_write(host, host->hcfg, SDHCFG);
438 bcm2835_sdhost_write(host, SDCDIV_MAX_CDIV, SDCDIV);
439 mmiowb();
440 }
441
442 static void bcm2835_sdhost_reset(struct mmc_host *mmc)
443 {
444 struct bcm2835_host *host = mmc_priv(mmc);
445 unsigned long flags;
446 spin_lock_irqsave(&host->lock, flags);
447 log_event("RST<", 0, 0);
448
449 bcm2835_sdhost_reset_internal(host);
450
451 spin_unlock_irqrestore(&host->lock, flags);
452 }
453
454 static void bcm2835_sdhost_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
455
456 static void bcm2835_sdhost_init(struct bcm2835_host *host, int soft)
457 {
458 pr_debug("bcm2835_sdhost_init(%d)\n", soft);
459
460 /* Set interrupt enables */
461 host->hcfg = SDHCFG_BUSY_IRPT_EN;
462
463 bcm2835_sdhost_reset_internal(host);
464
465 if (soft) {
466 /* force clock reconfiguration */
467 host->clock = 0;
468 bcm2835_sdhost_set_ios(host->mmc, &host->mmc->ios);
469 }
470 }
471
472 static void bcm2835_sdhost_wait_transfer_complete(struct bcm2835_host *host)
473 {
474 int timediff;
475 u32 alternate_idle;
476 u32 edm;
477
478 alternate_idle = (host->mrq->data->flags & MMC_DATA_READ) ?
479 SDEDM_FSM_READWAIT : SDEDM_FSM_WRITESTART1;
480
481 edm = bcm2835_sdhost_read(host, SDEDM);
482
483 log_event("WTC<", edm, 0);
484
485 timediff = 0;
486
487 while (1) {
488 u32 fsm = edm & SDEDM_FSM_MASK;
489 if ((fsm == SDEDM_FSM_IDENTMODE) ||
490 (fsm == SDEDM_FSM_DATAMODE))
491 break;
492 if (fsm == alternate_idle) {
493 bcm2835_sdhost_write(host,
494 edm | SDEDM_FORCE_DATA_MODE,
495 SDEDM);
496 break;
497 }
498
499 timediff++;
500 if (timediff == 100000) {
501 pr_err("%s: wait_transfer_complete - still waiting after %d retries\n",
502 mmc_hostname(host->mmc),
503 timediff);
504 log_dump();
505 bcm2835_sdhost_dumpregs(host);
506 host->mrq->data->error = -ETIMEDOUT;
507 log_event("WTC!", edm, 0);
508 return;
509 }
510 cpu_relax();
511 edm = bcm2835_sdhost_read(host, SDEDM);
512 }
513 log_event("WTC>", edm, 0);
514 }
515
516 static void bcm2835_sdhost_finish_data(struct bcm2835_host *host);
517
518 static void bcm2835_sdhost_dma_complete(void *param)
519 {
520 struct bcm2835_host *host = param;
521 struct mmc_data *data = host->data;
522 unsigned long flags;
523
524 spin_lock_irqsave(&host->lock, flags);
525 log_event("DMA<", (u32)host->data, bcm2835_sdhost_read(host, SDHSTS));
526 log_event("DMA ", bcm2835_sdhost_read(host, SDCMD),
527 bcm2835_sdhost_read(host, SDEDM));
528
529 if (host->dma_chan) {
530 dma_unmap_sg(host->dma_chan->device->dev,
531 data->sg, data->sg_len,
532 host->dma_dir);
533
534 host->dma_chan = NULL;
535 }
536
537 if (host->drain_words) {
538 void *page;
539 u32 *buf;
540
541 page = kmap_atomic(host->drain_page);
542 buf = page + host->drain_offset;
543
544 while (host->drain_words) {
545 u32 edm = bcm2835_sdhost_read(host, SDEDM);
546 if ((edm >> 4) & 0x1f)
547 *(buf++) = bcm2835_sdhost_read(host,
548 SDDATA);
549 host->drain_words--;
550 }
551
552 kunmap_atomic(page);
553 }
554
555 bcm2835_sdhost_finish_data(host);
556
557 log_event("DMA>", (u32)host->data, 0);
558 spin_unlock_irqrestore(&host->lock, flags);
559 }
560
561 static void bcm2835_sdhost_read_block_pio(struct bcm2835_host *host)
562 {
563 unsigned long flags;
564 size_t blksize, len;
565 u32 *buf;
566 unsigned long wait_max;
567
568 blksize = host->data->blksz;
569
570 wait_max = jiffies + msecs_to_jiffies(host->pio_timeout);
571
572 local_irq_save(flags);
573
574 while (blksize) {
575 int copy_words;
576 u32 hsts = 0;
577
578 if (!sg_miter_next(&host->sg_miter)) {
579 host->data->error = -EINVAL;
580 break;
581 }
582
583 len = min(host->sg_miter.length, blksize);
584 if (len % 4) {
585 host->data->error = -EINVAL;
586 break;
587 }
588
589 blksize -= len;
590 host->sg_miter.consumed = len;
591
592 buf = (u32 *)host->sg_miter.addr;
593
594 copy_words = len/4;
595
596 while (copy_words) {
597 int burst_words, words;
598 u32 edm;
599
600 burst_words = SDDATA_FIFO_PIO_BURST;
601 if (burst_words > copy_words)
602 burst_words = copy_words;
603 edm = bcm2835_sdhost_read(host, SDEDM);
604 words = ((edm >> 4) & 0x1f);
605
606 if (words < burst_words) {
607 int fsm_state = (edm & SDEDM_FSM_MASK);
608 if ((fsm_state != SDEDM_FSM_READDATA) &&
609 (fsm_state != SDEDM_FSM_READWAIT) &&
610 (fsm_state != SDEDM_FSM_READCRC)) {
611 hsts = bcm2835_sdhost_read(host,
612 SDHSTS);
613 pr_info("%s: fsm %x, hsts %x\n",
614 mmc_hostname(host->mmc),
615 fsm_state, hsts);
616 if (hsts & SDHSTS_ERROR_MASK)
617 break;
618 }
619
620 if (time_after(jiffies, wait_max)) {
621 pr_err("%s: PIO read timeout - EDM %x\n",
622 mmc_hostname(host->mmc),
623 edm);
624 hsts = SDHSTS_REW_TIME_OUT;
625 break;
626 }
627 ndelay((burst_words - words) *
628 host->ns_per_fifo_word);
629 continue;
630 } else if (words > copy_words) {
631 words = copy_words;
632 }
633
634 copy_words -= words;
635
636 while (words) {
637 *(buf++) = bcm2835_sdhost_read(host, SDDATA);
638 words--;
639 }
640 }
641
642 if (hsts & SDHSTS_ERROR_MASK)
643 break;
644 }
645
646 sg_miter_stop(&host->sg_miter);
647
648 local_irq_restore(flags);
649 }
650
651 static void bcm2835_sdhost_write_block_pio(struct bcm2835_host *host)
652 {
653 unsigned long flags;
654 size_t blksize, len;
655 u32 *buf;
656 unsigned long wait_max;
657
658 blksize = host->data->blksz;
659
660 wait_max = jiffies + msecs_to_jiffies(host->pio_timeout);
661
662 local_irq_save(flags);
663
664 while (blksize) {
665 int copy_words;
666 u32 hsts = 0;
667
668 if (!sg_miter_next(&host->sg_miter)) {
669 host->data->error = -EINVAL;
670 break;
671 }
672
673 len = min(host->sg_miter.length, blksize);
674 if (len % 4) {
675 host->data->error = -EINVAL;
676 break;
677 }
678
679 blksize -= len;
680 host->sg_miter.consumed = len;
681
682 buf = (u32 *)host->sg_miter.addr;
683
684 copy_words = len/4;
685
686 while (copy_words) {
687 int burst_words, words;
688 u32 edm;
689
690 burst_words = SDDATA_FIFO_PIO_BURST;
691 if (burst_words > copy_words)
692 burst_words = copy_words;
693 edm = bcm2835_sdhost_read(host, SDEDM);
694 words = SDDATA_FIFO_WORDS - ((edm >> 4) & 0x1f);
695
696 if (words < burst_words) {
697 int fsm_state = (edm & SDEDM_FSM_MASK);
698 if ((fsm_state != SDEDM_FSM_WRITEDATA) &&
699 (fsm_state != SDEDM_FSM_WRITESTART1) &&
700 (fsm_state != SDEDM_FSM_WRITESTART2)) {
701 hsts = bcm2835_sdhost_read(host,
702 SDHSTS);
703 pr_info("%s: fsm %x, hsts %x\n",
704 mmc_hostname(host->mmc),
705 fsm_state, hsts);
706 if (hsts & SDHSTS_ERROR_MASK)
707 break;
708 }
709
710 if (time_after(jiffies, wait_max)) {
711 pr_err("%s: PIO write timeout - EDM %x\n",
712 mmc_hostname(host->mmc),
713 edm);
714 hsts = SDHSTS_REW_TIME_OUT;
715 break;
716 }
717 ndelay((burst_words - words) *
718 host->ns_per_fifo_word);
719 continue;
720 } else if (words > copy_words) {
721 words = copy_words;
722 }
723
724 copy_words -= words;
725
726 while (words) {
727 bcm2835_sdhost_write(host, *(buf++), SDDATA);
728 words--;
729 }
730 }
731
732 if (hsts & SDHSTS_ERROR_MASK)
733 break;
734 }
735
736 sg_miter_stop(&host->sg_miter);
737
738 local_irq_restore(flags);
739 }
740
741 static void bcm2835_sdhost_transfer_pio(struct bcm2835_host *host)
742 {
743 u32 sdhsts;
744 bool is_read;
745 BUG_ON(!host->data);
746 log_event("XFP<", (u32)host->data, host->blocks);
747
748 is_read = (host->data->flags & MMC_DATA_READ) != 0;
749 if (is_read)
750 bcm2835_sdhost_read_block_pio(host);
751 else
752 bcm2835_sdhost_write_block_pio(host);
753
754 sdhsts = bcm2835_sdhost_read(host, SDHSTS);
755 if (sdhsts & (SDHSTS_CRC16_ERROR |
756 SDHSTS_CRC7_ERROR |
757 SDHSTS_FIFO_ERROR)) {
758 pr_err("%s: %s transfer error - HSTS %x\n",
759 mmc_hostname(host->mmc),
760 is_read ? "read" : "write",
761 sdhsts);
762 host->data->error = -EILSEQ;
763 } else if ((sdhsts & (SDHSTS_CMD_TIME_OUT |
764 SDHSTS_REW_TIME_OUT))) {
765 pr_err("%s: %s timeout error - HSTS %x\n",
766 mmc_hostname(host->mmc),
767 is_read ? "read" : "write",
768 sdhsts);
769 host->data->error = -ETIMEDOUT;
770 }
771 log_event("XFP>", (u32)host->data, host->blocks);
772 }
773
774 static void bcm2835_sdhost_prepare_dma(struct bcm2835_host *host,
775 struct mmc_data *data)
776 {
777 int len, dir_data, dir_slave;
778 struct dma_async_tx_descriptor *desc = NULL;
779 struct dma_chan *dma_chan;
780
781 log_event("PRD<", (u32)data, 0);
782 pr_debug("bcm2835_sdhost_prepare_dma()\n");
783
784 dma_chan = host->dma_chan_rxtx;
785 if (data->flags & MMC_DATA_READ) {
786 dir_data = DMA_FROM_DEVICE;
787 dir_slave = DMA_DEV_TO_MEM;
788 } else {
789 dir_data = DMA_TO_DEVICE;
790 dir_slave = DMA_MEM_TO_DEV;
791 }
792 log_event("PRD1", (u32)dma_chan, 0);
793
794 BUG_ON(!dma_chan->device);
795 BUG_ON(!dma_chan->device->dev);
796 BUG_ON(!data->sg);
797
798 /* The block doesn't manage the FIFO DREQs properly for multi-block
799 transfers, so don't attempt to DMA the final few words.
800 Unfortunately this requires the final sg entry to be trimmed.
801 N.B. This code demands that the overspill is contained in
802 a single sg entry.
803 */
804
805 host->drain_words = 0;
806 if ((data->blocks > 1) && (dir_data == DMA_FROM_DEVICE)) {
807 struct scatterlist *sg;
808 u32 len;
809 int i;
810
811 len = min((u32)(FIFO_READ_THRESHOLD - 1) * 4,
812 (u32)data->blocks * data->blksz);
813
814 for_each_sg(data->sg, sg, data->sg_len, i) {
815 if (sg_is_last(sg)) {
816 BUG_ON(sg->length < len);
817 sg->length -= len;
818 host->drain_page = sg_page(sg);
819 host->drain_offset = sg->offset + sg->length;
820 }
821 }
822 host->drain_words = len/4;
823 }
824
825 /* The parameters have already been validated, so this will not fail */
826 (void)dmaengine_slave_config(dma_chan,
827 (dir_data == DMA_FROM_DEVICE) ?
828 &host->dma_cfg_rx :
829 &host->dma_cfg_tx);
830
831 len = dma_map_sg(dma_chan->device->dev, data->sg, data->sg_len,
832 dir_data);
833
834 log_event("PRD2", len, 0);
835 if (len > 0)
836 desc = dmaengine_prep_slave_sg(dma_chan, data->sg,
837 len, dir_slave,
838 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
839 log_event("PRD3", (u32)desc, 0);
840
841 if (desc) {
842 desc->callback = bcm2835_sdhost_dma_complete;
843 desc->callback_param = host;
844 host->dma_desc = desc;
845 host->dma_chan = dma_chan;
846 host->dma_dir = dir_data;
847 }
848 log_event("PDM>", (u32)data, 0);
849 }
850
851 static void bcm2835_sdhost_start_dma(struct bcm2835_host *host)
852 {
853 log_event("SDMA", (u32)host->data, (u32)host->dma_chan);
854 dmaengine_submit(host->dma_desc);
855 dma_async_issue_pending(host->dma_chan);
856 }
857
858 static void bcm2835_sdhost_set_transfer_irqs(struct bcm2835_host *host)
859 {
860 u32 all_irqs = SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN |
861 SDHCFG_BUSY_IRPT_EN;
862 if (host->dma_desc)
863 host->hcfg = (host->hcfg & ~all_irqs) |
864 SDHCFG_BUSY_IRPT_EN;
865 else
866 host->hcfg = (host->hcfg & ~all_irqs) |
867 SDHCFG_DATA_IRPT_EN |
868 SDHCFG_BUSY_IRPT_EN;
869
870 bcm2835_sdhost_write(host, host->hcfg, SDHCFG);
871 }
872
873 static void bcm2835_sdhost_prepare_data(struct bcm2835_host *host, struct mmc_command *cmd)
874 {
875 struct mmc_data *data = cmd->data;
876
877 WARN_ON(host->data);
878
879 host->data = data;
880 if (!data)
881 return;
882
883 /* Sanity checks */
884 BUG_ON(data->blksz * data->blocks > 524288);
885 BUG_ON(data->blksz > host->mmc->max_blk_size);
886 BUG_ON(data->blocks > 65535);
887
888 host->data_complete = 0;
889 host->flush_fifo = 0;
890 host->data->bytes_xfered = 0;
891
892 if (!host->sectors && host->mmc->card) {
893 struct mmc_card *card = host->mmc->card;
894 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
895 /*
896 * The EXT_CSD sector count is in number of 512 byte
897 * sectors.
898 */
899 host->sectors = card->ext_csd.sectors;
900 } else {
901 /*
902 * The CSD capacity field is in units of read_blkbits.
903 * set_capacity takes units of 512 bytes.
904 */
905 host->sectors = card->csd.capacity <<
906 (card->csd.read_blkbits - 9);
907 }
908 }
909
910 if (!host->dma_desc) {
911 /* Use PIO */
912 int flags = SG_MITER_ATOMIC;
913
914 if (data->flags & MMC_DATA_READ)
915 flags |= SG_MITER_TO_SG;
916 else
917 flags |= SG_MITER_FROM_SG;
918 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
919 host->blocks = data->blocks;
920 }
921
922 bcm2835_sdhost_set_transfer_irqs(host);
923
924 bcm2835_sdhost_write(host, data->blksz, SDHBCT);
925 bcm2835_sdhost_write(host, data->blocks, SDHBLC);
926
927 BUG_ON(!host->data);
928 }
929
930 bool bcm2835_sdhost_send_command(struct bcm2835_host *host,
931 struct mmc_command *cmd)
932 {
933 u32 sdcmd, sdhsts;
934 unsigned long timeout;
935 int delay;
936
937 WARN_ON(host->cmd);
938 log_event("CMD<", cmd->opcode, cmd->arg);
939
940 if (cmd->data)
941 pr_debug("%s: send_command %d 0x%x "
942 "(flags 0x%x) - %s %d*%d\n",
943 mmc_hostname(host->mmc),
944 cmd->opcode, cmd->arg, cmd->flags,
945 (cmd->data->flags & MMC_DATA_READ) ?
946 "read" : "write", cmd->data->blocks,
947 cmd->data->blksz);
948 else
949 pr_debug("%s: send_command %d 0x%x (flags 0x%x)\n",
950 mmc_hostname(host->mmc),
951 cmd->opcode, cmd->arg, cmd->flags);
952
953 /* Wait max 100 ms */
954 timeout = 10000;
955
956 while (bcm2835_sdhost_read(host, SDCMD) & SDCMD_NEW_FLAG) {
957 if (timeout == 0) {
958 pr_warn("%s: previous command never completed.\n",
959 mmc_hostname(host->mmc));
960 if (host->debug)
961 bcm2835_sdhost_dumpregs(host);
962 cmd->error = -EILSEQ;
963 tasklet_schedule(&host->finish_tasklet);
964 return false;
965 }
966 timeout--;
967 udelay(10);
968 }
969
970 delay = (10000 - timeout)/100;
971 if (delay > host->max_delay) {
972 host->max_delay = delay;
973 pr_warning("%s: controller hung for %d ms\n",
974 mmc_hostname(host->mmc),
975 host->max_delay);
976 }
977
978 timeout = jiffies;
979 if (!cmd->data && cmd->busy_timeout > 9000)
980 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
981 else
982 timeout += 10 * HZ;
983 mod_timer(&host->timer, timeout);
984
985 host->cmd = cmd;
986
987 /* Clear any error flags */
988 sdhsts = bcm2835_sdhost_read(host, SDHSTS);
989 if (sdhsts & SDHSTS_ERROR_MASK)
990 bcm2835_sdhost_write(host, sdhsts, SDHSTS);
991
992 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
993 pr_err("%s: unsupported response type!\n",
994 mmc_hostname(host->mmc));
995 cmd->error = -EINVAL;
996 tasklet_schedule(&host->finish_tasklet);
997 return false;
998 }
999
1000 bcm2835_sdhost_prepare_data(host, cmd);
1001
1002 bcm2835_sdhost_write(host, cmd->arg, SDARG);
1003
1004 sdcmd = cmd->opcode & SDCMD_CMD_MASK;
1005
1006 host->use_busy = 0;
1007 if (!(cmd->flags & MMC_RSP_PRESENT)) {
1008 sdcmd |= SDCMD_NO_RESPONSE;
1009 } else {
1010 if (cmd->flags & MMC_RSP_136)
1011 sdcmd |= SDCMD_LONG_RESPONSE;
1012 if (cmd->flags & MMC_RSP_BUSY) {
1013 sdcmd |= SDCMD_BUSYWAIT;
1014 host->use_busy = 1;
1015 }
1016 }
1017
1018 if (cmd->data) {
1019 log_event("CMDD", cmd->data->blocks, cmd->data->blksz);
1020 if (host->delay_after_this_stop) {
1021 struct timeval now;
1022 int time_since_stop;
1023 do_gettimeofday(&now);
1024 time_since_stop = (now.tv_sec - host->stop_time.tv_sec);
1025 if (time_since_stop < 2) {
1026 /* Possibly less than one second */
1027 time_since_stop = time_since_stop * 1000000 +
1028 (now.tv_usec - host->stop_time.tv_usec);
1029 if (time_since_stop <
1030 host->delay_after_this_stop)
1031 udelay(host->delay_after_this_stop -
1032 time_since_stop);
1033 }
1034 }
1035
1036 host->delay_after_this_stop = host->delay_after_stop;
1037 if ((cmd->data->flags & MMC_DATA_READ) && !host->use_sbc) {
1038 /* See if read crosses one of the hazardous sectors */
1039 u32 first_blk, last_blk;
1040
1041 /* Intentionally include the following sector because
1042 without CMD23/SBC the read may run on. */
1043 first_blk = host->mrq->cmd->arg;
1044 last_blk = first_blk + cmd->data->blocks;
1045
1046 if (((last_blk >= (host->sectors - 64)) &&
1047 (first_blk <= (host->sectors - 64))) ||
1048 ((last_blk >= (host->sectors - 32)) &&
1049 (first_blk <= (host->sectors - 32)))) {
1050 host->delay_after_this_stop =
1051 max(250u, host->delay_after_stop);
1052 }
1053 }
1054
1055 if (cmd->data->flags & MMC_DATA_WRITE)
1056 sdcmd |= SDCMD_WRITE_CMD;
1057 if (cmd->data->flags & MMC_DATA_READ)
1058 sdcmd |= SDCMD_READ_CMD;
1059 }
1060
1061 bcm2835_sdhost_write(host, sdcmd | SDCMD_NEW_FLAG, SDCMD);
1062
1063 return true;
1064 }
1065
1066 static void bcm2835_sdhost_finish_command(struct bcm2835_host *host,
1067 unsigned long *irq_flags);
1068 static void bcm2835_sdhost_transfer_complete(struct bcm2835_host *host);
1069
1070 static void bcm2835_sdhost_finish_data(struct bcm2835_host *host)
1071 {
1072 struct mmc_data *data;
1073
1074 data = host->data;
1075 BUG_ON(!data);
1076
1077 log_event("FDA<", (u32)host->mrq, (u32)host->cmd);
1078 pr_debug("finish_data(error %d, stop %d, sbc %d)\n",
1079 data->error, data->stop ? 1 : 0,
1080 host->mrq->sbc ? 1 : 0);
1081
1082 host->hcfg &= ~(SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN);
1083 bcm2835_sdhost_write(host, host->hcfg, SDHCFG);
1084
1085 data->bytes_xfered = data->error ? 0 : (data->blksz * data->blocks);
1086
1087 host->data_complete = 1;
1088
1089 if (host->cmd) {
1090 /*
1091 * Data managed to finish before the
1092 * command completed. Make sure we do
1093 * things in the proper order.
1094 */
1095 pr_debug("Finished early - HSTS %x\n",
1096 bcm2835_sdhost_read(host, SDHSTS));
1097 }
1098 else
1099 bcm2835_sdhost_transfer_complete(host);
1100 log_event("FDA>", (u32)host->mrq, (u32)host->cmd);
1101 }
1102
1103 static void bcm2835_sdhost_transfer_complete(struct bcm2835_host *host)
1104 {
1105 struct mmc_data *data;
1106
1107 BUG_ON(host->cmd);
1108 BUG_ON(!host->data);
1109 BUG_ON(!host->data_complete);
1110
1111 data = host->data;
1112 host->data = NULL;
1113
1114 log_event("TCM<", (u32)data, data->error);
1115 pr_debug("transfer_complete(error %d, stop %d)\n",
1116 data->error, data->stop ? 1 : 0);
1117
1118 /*
1119 * Need to send CMD12 if -
1120 * a) open-ended multiblock transfer (no CMD23)
1121 * b) error in multiblock transfer
1122 */
1123 if (host->mrq->stop && (data->error || !host->use_sbc)) {
1124 if (bcm2835_sdhost_send_command(host, host->mrq->stop)) {
1125 /* No busy, so poll for completion */
1126 if (!host->use_busy)
1127 bcm2835_sdhost_finish_command(host, NULL);
1128
1129 if (host->delay_after_this_stop)
1130 do_gettimeofday(&host->stop_time);
1131 }
1132 } else {
1133 bcm2835_sdhost_wait_transfer_complete(host);
1134 tasklet_schedule(&host->finish_tasklet);
1135 }
1136 log_event("TCM>", (u32)data, 0);
1137 }
1138
1139 /* If irq_flags is valid, the caller is in a thread context and is allowed
1140 to sleep */
1141 static void bcm2835_sdhost_finish_command(struct bcm2835_host *host,
1142 unsigned long *irq_flags)
1143 {
1144 u32 sdcmd;
1145 u32 retries;
1146 #ifdef DEBUG
1147 struct timeval before, after;
1148 int timediff = 0;
1149 #endif
1150
1151 log_event("FCM<", (u32)host->mrq, (u32)host->cmd);
1152 pr_debug("finish_command(%x)\n", bcm2835_sdhost_read(host, SDCMD));
1153
1154 BUG_ON(!host->cmd || !host->mrq);
1155
1156 /* Poll quickly at first */
1157
1158 retries = host->cmd_quick_poll_retries;
1159 if (!retries) {
1160 /* Work out how many polls take 1us by timing 10us */
1161 struct timeval start, now;
1162 int us_diff;
1163
1164 retries = 1;
1165 do {
1166 int i;
1167
1168 retries *= 2;
1169
1170 do_gettimeofday(&start);
1171
1172 for (i = 0; i < retries; i++) {
1173 cpu_relax();
1174 sdcmd = bcm2835_sdhost_read(host, SDCMD);
1175 }
1176
1177 do_gettimeofday(&now);
1178 us_diff = (now.tv_sec - start.tv_sec) * 1000000 +
1179 (now.tv_usec - start.tv_usec);
1180 } while (us_diff < 10);
1181
1182 host->cmd_quick_poll_retries = ((retries * us_diff + 9)*CMD_DALLY_US)/10 + 1;
1183 retries = 1; // We've already waited long enough this time
1184 }
1185
1186 for (sdcmd = bcm2835_sdhost_read(host, SDCMD);
1187 (sdcmd & SDCMD_NEW_FLAG) && retries;
1188 retries--) {
1189 cpu_relax();
1190 sdcmd = bcm2835_sdhost_read(host, SDCMD);
1191 }
1192
1193 if (!retries) {
1194 unsigned long wait_max;
1195
1196 if (!irq_flags) {
1197 /* Schedule the work */
1198 log_event("CWWQ", 0, 0);
1199 schedule_work(&host->cmd_wait_wq);
1200 return;
1201 }
1202
1203 /* Wait max 100 ms */
1204 wait_max = jiffies + msecs_to_jiffies(100);
1205 while (time_before(jiffies, wait_max)) {
1206 spin_unlock_irqrestore(&host->lock, *irq_flags);
1207 usleep_range(1, 10);
1208 spin_lock_irqsave(&host->lock, *irq_flags);
1209 sdcmd = bcm2835_sdhost_read(host, SDCMD);
1210 if (!(sdcmd & SDCMD_NEW_FLAG))
1211 break;
1212 }
1213 }
1214
1215 /* Check for errors */
1216 if (sdcmd & SDCMD_NEW_FLAG) {
1217 if (host->debug) {
1218 pr_err("%s: command %d never completed.\n",
1219 mmc_hostname(host->mmc), host->cmd->opcode);
1220 bcm2835_sdhost_dumpregs(host);
1221 }
1222 host->cmd->error = -EILSEQ;
1223 tasklet_schedule(&host->finish_tasklet);
1224 return;
1225 } else if (sdcmd & SDCMD_FAIL_FLAG) {
1226 u32 sdhsts = bcm2835_sdhost_read(host, SDHSTS);
1227
1228 /* Clear the errors */
1229 bcm2835_sdhost_write(host, SDHSTS_ERROR_MASK, SDHSTS);
1230
1231 if (host->debug)
1232 pr_info("%s: error detected - CMD %x, HSTS %03x, EDM %x\n",
1233 mmc_hostname(host->mmc), sdcmd, sdhsts,
1234 bcm2835_sdhost_read(host, SDEDM));
1235
1236 if ((sdhsts & SDHSTS_CRC7_ERROR) &&
1237 (host->cmd->opcode == 1)) {
1238 if (host->debug)
1239 pr_info("%s: ignoring CRC7 error for CMD1\n",
1240 mmc_hostname(host->mmc));
1241 } else {
1242 if (sdhsts & SDHSTS_CMD_TIME_OUT) {
1243 if (host->debug)
1244 pr_warn("%s: command %d timeout\n",
1245 mmc_hostname(host->mmc),
1246 host->cmd->opcode);
1247 host->cmd->error = -ETIMEDOUT;
1248 } else {
1249 pr_warn("%s: unexpected command %d error\n",
1250 mmc_hostname(host->mmc),
1251 host->cmd->opcode);
1252 host->cmd->error = -EILSEQ;
1253 }
1254 tasklet_schedule(&host->finish_tasklet);
1255 return;
1256 }
1257 }
1258
1259 if (host->cmd->flags & MMC_RSP_PRESENT) {
1260 if (host->cmd->flags & MMC_RSP_136) {
1261 int i;
1262 for (i = 0; i < 4; i++)
1263 host->cmd->resp[3 - i] = bcm2835_sdhost_read(host, SDRSP0 + i*4);
1264 pr_debug("%s: finish_command %08x %08x %08x %08x\n",
1265 mmc_hostname(host->mmc),
1266 host->cmd->resp[0], host->cmd->resp[1], host->cmd->resp[2], host->cmd->resp[3]);
1267 log_event("RSP ", host->cmd->resp[0], host->cmd->resp[1]);
1268 } else {
1269 host->cmd->resp[0] = bcm2835_sdhost_read(host, SDRSP0);
1270 pr_debug("%s: finish_command %08x\n",
1271 mmc_hostname(host->mmc),
1272 host->cmd->resp[0]);
1273 log_event("RSP ", host->cmd->resp[0], 0);
1274 }
1275 }
1276
1277 if (host->cmd == host->mrq->sbc) {
1278 /* Finished CMD23, now send actual command. */
1279 host->cmd = NULL;
1280 if (bcm2835_sdhost_send_command(host, host->mrq->cmd)) {
1281 if (host->data && host->dma_desc)
1282 /* DMA transfer starts now, PIO starts after irq */
1283 bcm2835_sdhost_start_dma(host);
1284
1285 if (!host->use_busy)
1286 bcm2835_sdhost_finish_command(host, NULL);
1287 }
1288 } else if (host->cmd == host->mrq->stop) {
1289 /* Finished CMD12 */
1290 tasklet_schedule(&host->finish_tasklet);
1291 } else {
1292 /* Processed actual command. */
1293 host->cmd = NULL;
1294 if (!host->data)
1295 tasklet_schedule(&host->finish_tasklet);
1296 else if (host->data_complete)
1297 bcm2835_sdhost_transfer_complete(host);
1298 }
1299 log_event("FCM>", (u32)host->mrq, (u32)host->cmd);
1300 }
1301
1302 static void bcm2835_sdhost_timeout(unsigned long data)
1303 {
1304 struct bcm2835_host *host;
1305 unsigned long flags;
1306
1307 host = (struct bcm2835_host *)data;
1308
1309 spin_lock_irqsave(&host->lock, flags);
1310 log_event("TIM<", 0, 0);
1311
1312 if (host->mrq) {
1313 pr_err("%s: timeout waiting for hardware interrupt.\n",
1314 mmc_hostname(host->mmc));
1315 log_dump();
1316 bcm2835_sdhost_dumpregs(host);
1317
1318 if (host->data) {
1319 host->data->error = -ETIMEDOUT;
1320 bcm2835_sdhost_finish_data(host);
1321 } else {
1322 if (host->cmd)
1323 host->cmd->error = -ETIMEDOUT;
1324 else
1325 host->mrq->cmd->error = -ETIMEDOUT;
1326
1327 pr_debug("timeout_timer tasklet_schedule\n");
1328 tasklet_schedule(&host->finish_tasklet);
1329 }
1330 }
1331
1332 mmiowb();
1333 spin_unlock_irqrestore(&host->lock, flags);
1334 }
1335
1336 static void bcm2835_sdhost_busy_irq(struct bcm2835_host *host, u32 intmask)
1337 {
1338 log_event("IRQB", (u32)host->cmd, intmask);
1339 if (!host->cmd) {
1340 pr_err("%s: got command busy interrupt 0x%08x even "
1341 "though no command operation was in progress.\n",
1342 mmc_hostname(host->mmc), (unsigned)intmask);
1343 bcm2835_sdhost_dumpregs(host);
1344 return;
1345 }
1346
1347 if (!host->use_busy) {
1348 pr_err("%s: got command busy interrupt 0x%08x even "
1349 "though not expecting one.\n",
1350 mmc_hostname(host->mmc), (unsigned)intmask);
1351 bcm2835_sdhost_dumpregs(host);
1352 return;
1353 }
1354 host->use_busy = 0;
1355
1356 if (intmask & SDHSTS_ERROR_MASK)
1357 {
1358 pr_err("sdhost_busy_irq: intmask %x, data %p\n", intmask, host->mrq->data);
1359 if (intmask & SDHSTS_CRC7_ERROR)
1360 host->cmd->error = -EILSEQ;
1361 else if (intmask & (SDHSTS_CRC16_ERROR |
1362 SDHSTS_FIFO_ERROR)) {
1363 if (host->mrq->data)
1364 host->mrq->data->error = -EILSEQ;
1365 else
1366 host->cmd->error = -EILSEQ;
1367 } else if (intmask & SDHSTS_REW_TIME_OUT) {
1368 if (host->mrq->data)
1369 host->mrq->data->error = -ETIMEDOUT;
1370 else
1371 host->cmd->error = -ETIMEDOUT;
1372 } else if (intmask & SDHSTS_CMD_TIME_OUT)
1373 host->cmd->error = -ETIMEDOUT;
1374
1375 if (host->debug) {
1376 log_dump();
1377 bcm2835_sdhost_dumpregs(host);
1378 }
1379 }
1380 else
1381 bcm2835_sdhost_finish_command(host, NULL);
1382 }
1383
1384 static void bcm2835_sdhost_data_irq(struct bcm2835_host *host, u32 intmask)
1385 {
1386 /* There are no dedicated data/space available interrupt
1387 status bits, so it is necessary to use the single shared
1388 data/space available FIFO status bits. It is therefore not
1389 an error to get here when there is no data transfer in
1390 progress. */
1391 log_event("IRQD", (u32)host->data, intmask);
1392 if (!host->data)
1393 return;
1394
1395 if (intmask & (SDHSTS_CRC16_ERROR |
1396 SDHSTS_FIFO_ERROR |
1397 SDHSTS_REW_TIME_OUT)) {
1398 if (intmask & (SDHSTS_CRC16_ERROR |
1399 SDHSTS_FIFO_ERROR))
1400 host->data->error = -EILSEQ;
1401 else
1402 host->data->error = -ETIMEDOUT;
1403
1404 if (host->debug) {
1405 log_dump();
1406 bcm2835_sdhost_dumpregs(host);
1407 }
1408 }
1409
1410 if (host->data->error) {
1411 bcm2835_sdhost_finish_data(host);
1412 } else if (host->data->flags & MMC_DATA_WRITE) {
1413 /* Use the block interrupt for writes after the first block */
1414 host->hcfg &= ~(SDHCFG_DATA_IRPT_EN);
1415 host->hcfg |= SDHCFG_BLOCK_IRPT_EN;
1416 bcm2835_sdhost_write(host, host->hcfg, SDHCFG);
1417 bcm2835_sdhost_transfer_pio(host);
1418 } else {
1419 bcm2835_sdhost_transfer_pio(host);
1420 host->blocks--;
1421 if ((host->blocks == 0) || host->data->error)
1422 bcm2835_sdhost_finish_data(host);
1423 }
1424 }
1425
1426 static void bcm2835_sdhost_block_irq(struct bcm2835_host *host, u32 intmask)
1427 {
1428 log_event("IRQK", (u32)host->data, intmask);
1429 if (!host->data) {
1430 pr_err("%s: got block interrupt 0x%08x even "
1431 "though no data operation was in progress.\n",
1432 mmc_hostname(host->mmc), (unsigned)intmask);
1433 bcm2835_sdhost_dumpregs(host);
1434 return;
1435 }
1436
1437 if (intmask & (SDHSTS_CRC16_ERROR |
1438 SDHSTS_FIFO_ERROR |
1439 SDHSTS_REW_TIME_OUT)) {
1440 if (intmask & (SDHSTS_CRC16_ERROR |
1441 SDHSTS_FIFO_ERROR))
1442 host->data->error = -EILSEQ;
1443 else
1444 host->data->error = -ETIMEDOUT;
1445
1446 if (host->debug) {
1447 log_dump();
1448 bcm2835_sdhost_dumpregs(host);
1449 }
1450 }
1451
1452 if (!host->dma_desc) {
1453 BUG_ON(!host->blocks);
1454 if (host->data->error || (--host->blocks == 0)) {
1455 bcm2835_sdhost_finish_data(host);
1456 } else {
1457 bcm2835_sdhost_transfer_pio(host);
1458 }
1459 } else if (host->data->flags & MMC_DATA_WRITE) {
1460 bcm2835_sdhost_finish_data(host);
1461 }
1462 }
1463
1464 static irqreturn_t bcm2835_sdhost_irq(int irq, void *dev_id)
1465 {
1466 irqreturn_t result = IRQ_NONE;
1467 struct bcm2835_host *host = dev_id;
1468 u32 intmask;
1469
1470 spin_lock(&host->lock);
1471
1472 intmask = bcm2835_sdhost_read(host, SDHSTS);
1473 log_event("IRQ<", intmask, 0);
1474
1475 bcm2835_sdhost_write(host,
1476 SDHSTS_BUSY_IRPT |
1477 SDHSTS_BLOCK_IRPT |
1478 SDHSTS_SDIO_IRPT |
1479 SDHSTS_DATA_FLAG,
1480 SDHSTS);
1481
1482 if (intmask & SDHSTS_BLOCK_IRPT) {
1483 bcm2835_sdhost_block_irq(host, intmask);
1484 result = IRQ_HANDLED;
1485 }
1486
1487 if (intmask & SDHSTS_BUSY_IRPT) {
1488 bcm2835_sdhost_busy_irq(host, intmask);
1489 result = IRQ_HANDLED;
1490 }
1491
1492 /* There is no true data interrupt status bit, so it is
1493 necessary to qualify the data flag with the interrupt
1494 enable bit */
1495 if ((intmask & SDHSTS_DATA_FLAG) &&
1496 (host->hcfg & SDHCFG_DATA_IRPT_EN)) {
1497 bcm2835_sdhost_data_irq(host, intmask);
1498 result = IRQ_HANDLED;
1499 }
1500
1501 mmiowb();
1502
1503 log_event("IRQ>", bcm2835_sdhost_read(host, SDHSTS), 0);
1504 spin_unlock(&host->lock);
1505
1506 return result;
1507 }
1508
1509 void bcm2835_sdhost_set_clock(struct bcm2835_host *host, unsigned int clock)
1510 {
1511 int div = 0; /* Initialized for compiler warning */
1512 unsigned int input_clock = clock;
1513 unsigned long flags;
1514
1515 if (host->debug)
1516 pr_info("%s: set_clock(%d)\n", mmc_hostname(host->mmc), clock);
1517
1518 if ((host->overclock_50 > 50) &&
1519 (clock == 50*MHZ))
1520 clock = host->overclock_50 * MHZ + (MHZ - 1);
1521
1522 /* The SDCDIV register has 11 bits, and holds (div - 2).
1523 But in data mode the max is 50MHz wihout a minimum, and only the
1524 bottom 3 bits are used. Since the switch over is automatic (unless
1525 we have marked the card as slow...), chosen values have to make
1526 sense in both modes.
1527 Ident mode must be 100-400KHz, so can range check the requested
1528 clock. CMD15 must be used to return to data mode, so this can be
1529 monitored.
1530
1531 clock 250MHz -> 0->125MHz, 1->83.3MHz, 2->62.5MHz, 3->50.0MHz
1532 4->41.7MHz, 5->35.7MHz, 6->31.3MHz, 7->27.8MHz
1533
1534 623->400KHz/27.8MHz
1535 reset value (507)->491159/50MHz
1536
1537 BUT, the 3-bit clock divisor in data mode is too small if the
1538 core clock is higher than 250MHz, so instead use the SLOW_CARD
1539 configuration bit to force the use of the ident clock divisor
1540 at all times.
1541 */
1542
1543 host->mmc->actual_clock = 0;
1544
1545 if (host->firmware_sets_cdiv) {
1546 u32 msg[3] = { clock, 0, 0 };
1547
1548 rpi_firmware_property(rpi_firmware_get(NULL),
1549 RPI_FIRMWARE_SET_SDHOST_CLOCK,
1550 &msg, sizeof(msg));
1551
1552 clock = max(msg[1], msg[2]);
1553 spin_lock_irqsave(&host->lock, flags);
1554 } else {
1555 spin_lock_irqsave(&host->lock, flags);
1556 if (clock < 100000) {
1557 /* Can't stop the clock, but make it as slow as
1558 * possible to show willing
1559 */
1560 host->cdiv = SDCDIV_MAX_CDIV;
1561 bcm2835_sdhost_write(host, host->cdiv, SDCDIV);
1562 mmiowb();
1563 spin_unlock_irqrestore(&host->lock, flags);
1564 return;
1565 }
1566
1567 div = host->max_clk / clock;
1568 if (div < 2)
1569 div = 2;
1570 if ((host->max_clk / div) > clock)
1571 div++;
1572 div -= 2;
1573
1574 if (div > SDCDIV_MAX_CDIV)
1575 div = SDCDIV_MAX_CDIV;
1576
1577 clock = host->max_clk / (div + 2);
1578
1579 host->cdiv = div;
1580 bcm2835_sdhost_write(host, host->cdiv, SDCDIV);
1581
1582 if (host->debug)
1583 pr_info("%s: clock=%d -> max_clk=%d, cdiv=%x "
1584 "(actual clock %d)\n",
1585 mmc_hostname(host->mmc), input_clock,
1586 host->max_clk, host->cdiv,
1587 clock);
1588 }
1589
1590 /* Calibrate some delays */
1591
1592 host->ns_per_fifo_word = (1000000000/clock) *
1593 ((host->mmc->caps & MMC_CAP_4_BIT_DATA) ? 8 : 32);
1594
1595 if (input_clock == 50 * MHZ) {
1596 if (clock > input_clock) {
1597 /* Save the closest value, to make it easier
1598 to reduce in the event of error */
1599 host->overclock_50 = (clock/MHZ);
1600
1601 if (clock != host->overclock) {
1602 pr_info("%s: overclocking to %dHz\n",
1603 mmc_hostname(host->mmc), clock);
1604 host->overclock = clock;
1605 }
1606 } else if (host->overclock) {
1607 host->overclock = 0;
1608 if (clock == 50 * MHZ)
1609 pr_warn("%s: cancelling overclock\n",
1610 mmc_hostname(host->mmc));
1611 }
1612 } else if (input_clock == 0) {
1613 /* Reset the preferred overclock when the clock is stopped.
1614 * This always happens during initialisation. */
1615 host->overclock_50 = host->user_overclock_50;
1616 host->overclock = 0;
1617 }
1618
1619 /* Set the timeout to 500ms */
1620 bcm2835_sdhost_write(host, clock/2, SDTOUT);
1621
1622 host->mmc->actual_clock = clock;
1623 host->clock = input_clock;
1624 host->reset_clock = 0;
1625
1626 mmiowb();
1627 spin_unlock_irqrestore(&host->lock, flags);
1628 }
1629
1630 static void bcm2835_sdhost_request(struct mmc_host *mmc, struct mmc_request *mrq)
1631 {
1632 struct bcm2835_host *host;
1633 unsigned long flags;
1634 u32 edm, fsm;
1635
1636 host = mmc_priv(mmc);
1637
1638 if (host->debug) {
1639 struct mmc_command *cmd = mrq->cmd;
1640 BUG_ON(!cmd);
1641 if (cmd->data)
1642 pr_info("%s: cmd %d 0x%x (flags 0x%x) - %s %d*%d\n",
1643 mmc_hostname(mmc),
1644 cmd->opcode, cmd->arg, cmd->flags,
1645 (cmd->data->flags & MMC_DATA_READ) ?
1646 "read" : "write", cmd->data->blocks,
1647 cmd->data->blksz);
1648 else
1649 pr_info("%s: cmd %d 0x%x (flags 0x%x)\n",
1650 mmc_hostname(mmc),
1651 cmd->opcode, cmd->arg, cmd->flags);
1652 }
1653
1654 /* Reset the error statuses in case this is a retry */
1655 if (mrq->sbc)
1656 mrq->sbc->error = 0;
1657 if (mrq->cmd)
1658 mrq->cmd->error = 0;
1659 if (mrq->data)
1660 mrq->data->error = 0;
1661 if (mrq->stop)
1662 mrq->stop->error = 0;
1663
1664 if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
1665 pr_err("%s: unsupported block size (%d bytes)\n",
1666 mmc_hostname(mmc), mrq->data->blksz);
1667 mrq->cmd->error = -EINVAL;
1668 mmc_request_done(mmc, mrq);
1669 return;
1670 }
1671
1672 if (host->use_dma && mrq->data &&
1673 (mrq->data->blocks > host->pio_limit))
1674 bcm2835_sdhost_prepare_dma(host, mrq->data);
1675
1676 if (host->reset_clock)
1677 bcm2835_sdhost_set_clock(host, host->clock);
1678
1679 spin_lock_irqsave(&host->lock, flags);
1680
1681 WARN_ON(host->mrq != NULL);
1682 host->mrq = mrq;
1683
1684 edm = bcm2835_sdhost_read(host, SDEDM);
1685 fsm = edm & SDEDM_FSM_MASK;
1686
1687 log_event("REQ<", (u32)mrq, edm);
1688 if ((fsm != SDEDM_FSM_IDENTMODE) &&
1689 (fsm != SDEDM_FSM_DATAMODE)) {
1690 log_event("REQ!", (u32)mrq, edm);
1691 if (host->debug) {
1692 pr_warn("%s: previous command (%d) not complete (EDM %x)\n",
1693 mmc_hostname(host->mmc),
1694 bcm2835_sdhost_read(host, SDCMD) & SDCMD_CMD_MASK,
1695 edm);
1696 log_dump();
1697 bcm2835_sdhost_dumpregs(host);
1698 }
1699 mrq->cmd->error = -EILSEQ;
1700 tasklet_schedule(&host->finish_tasklet);
1701 mmiowb();
1702 spin_unlock_irqrestore(&host->lock, flags);
1703 return;
1704 }
1705
1706 host->use_sbc = !!mrq->sbc &&
1707 (host->mrq->data->flags & USE_CMD23_FLAGS);
1708 if (host->use_sbc) {
1709 if (bcm2835_sdhost_send_command(host, mrq->sbc)) {
1710 if (!host->use_busy)
1711 bcm2835_sdhost_finish_command(host, &flags);
1712 }
1713 } else if (bcm2835_sdhost_send_command(host, mrq->cmd)) {
1714 if (host->data && host->dma_desc)
1715 /* DMA transfer starts now, PIO starts after irq */
1716 bcm2835_sdhost_start_dma(host);
1717
1718 if (!host->use_busy)
1719 bcm2835_sdhost_finish_command(host, &flags);
1720 }
1721
1722 log_event("CMD ", (u32)mrq->cmd->opcode,
1723 mrq->data ? (u32)mrq->data->blksz : 0);
1724 mmiowb();
1725
1726 log_event("REQ>", (u32)mrq, 0);
1727 spin_unlock_irqrestore(&host->lock, flags);
1728 }
1729
1730 static void bcm2835_sdhost_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1731 {
1732
1733 struct bcm2835_host *host = mmc_priv(mmc);
1734 unsigned long flags;
1735
1736 if (host->debug)
1737 pr_info("%s: ios clock %d, pwr %d, bus_width %d, "
1738 "timing %d, vdd %d, drv_type %d\n",
1739 mmc_hostname(mmc),
1740 ios->clock, ios->power_mode, ios->bus_width,
1741 ios->timing, ios->signal_voltage, ios->drv_type);
1742
1743 spin_lock_irqsave(&host->lock, flags);
1744
1745 log_event("IOS<", ios->clock, 0);
1746
1747 /* set bus width */
1748 host->hcfg &= ~SDHCFG_WIDE_EXT_BUS;
1749 if (ios->bus_width == MMC_BUS_WIDTH_4)
1750 host->hcfg |= SDHCFG_WIDE_EXT_BUS;
1751
1752 host->hcfg |= SDHCFG_WIDE_INT_BUS;
1753
1754 /* Disable clever clock switching, to cope with fast core clocks */
1755 host->hcfg |= SDHCFG_SLOW_CARD;
1756
1757 bcm2835_sdhost_write(host, host->hcfg, SDHCFG);
1758
1759 mmiowb();
1760
1761 spin_unlock_irqrestore(&host->lock, flags);
1762
1763 if (!ios->clock || ios->clock != host->clock)
1764 bcm2835_sdhost_set_clock(host, ios->clock);
1765 }
1766
1767 static struct mmc_host_ops bcm2835_sdhost_ops = {
1768 .request = bcm2835_sdhost_request,
1769 .set_ios = bcm2835_sdhost_set_ios,
1770 .hw_reset = bcm2835_sdhost_reset,
1771 };
1772
1773 static void bcm2835_sdhost_cmd_wait_work(struct work_struct *work)
1774 {
1775 struct bcm2835_host *host;
1776 unsigned long flags;
1777
1778 host = container_of(work, struct bcm2835_host, cmd_wait_wq);
1779
1780 spin_lock_irqsave(&host->lock, flags);
1781
1782 log_event("CWK<", (u32)host->cmd, (u32)host->mrq);
1783
1784 /*
1785 * If this tasklet gets rescheduled while running, it will
1786 * be run again afterwards but without any active request.
1787 */
1788 if (!host->mrq) {
1789 spin_unlock_irqrestore(&host->lock, flags);
1790 return;
1791 }
1792
1793 bcm2835_sdhost_finish_command(host, &flags);
1794
1795 mmiowb();
1796
1797 log_event("CWK>", (u32)host->cmd, 0);
1798
1799 spin_unlock_irqrestore(&host->lock, flags);
1800 }
1801
1802 static void bcm2835_sdhost_tasklet_finish(unsigned long param)
1803 {
1804 struct bcm2835_host *host;
1805 unsigned long flags;
1806 struct mmc_request *mrq;
1807 struct dma_chan *terminate_chan = NULL;
1808
1809 host = (struct bcm2835_host *)param;
1810
1811 spin_lock_irqsave(&host->lock, flags);
1812
1813 log_event("TSK<", (u32)host->mrq, 0);
1814 /*
1815 * If this tasklet gets rescheduled while running, it will
1816 * be run again afterwards but without any active request.
1817 */
1818 if (!host->mrq) {
1819 spin_unlock_irqrestore(&host->lock, flags);
1820 return;
1821 }
1822
1823 del_timer(&host->timer);
1824
1825 mrq = host->mrq;
1826
1827 /* Drop the overclock after any data corruption, or after any
1828 * error while overclocked. Ignore errors for status commands,
1829 * as they are likely when a card is ejected. */
1830 if (host->overclock) {
1831 if ((mrq->cmd && mrq->cmd->error &&
1832 (mrq->cmd->opcode != MMC_SEND_STATUS)) ||
1833 (mrq->data && mrq->data->error) ||
1834 (mrq->stop && mrq->stop->error) ||
1835 (mrq->sbc && mrq->sbc->error)) {
1836 host->overclock_50--;
1837 pr_warn("%s: reducing overclock due to errors\n",
1838 mmc_hostname(host->mmc));
1839 host->reset_clock = 1;
1840 mrq->cmd->error = -ETIMEDOUT;
1841 mrq->cmd->retries = 1;
1842 }
1843 }
1844
1845 host->mrq = NULL;
1846 host->cmd = NULL;
1847 host->data = NULL;
1848
1849 mmiowb();
1850
1851 host->dma_desc = NULL;
1852 terminate_chan = host->dma_chan;
1853 host->dma_chan = NULL;
1854
1855 spin_unlock_irqrestore(&host->lock, flags);
1856
1857 if (terminate_chan)
1858 {
1859 int err = dmaengine_terminate_all(terminate_chan);
1860 if (err)
1861 pr_err("%s: failed to terminate DMA (%d)\n",
1862 mmc_hostname(host->mmc), err);
1863 }
1864
1865 /* The SDHOST block doesn't report any errors for a disconnected
1866 interface. All cards and SDIO devices should report some supported
1867 voltage range, so a zero response to SEND_OP_COND, IO_SEND_OP_COND
1868 or APP_SEND_OP_COND can be treated as an error. */
1869 if (((mrq->cmd->opcode == MMC_SEND_OP_COND) ||
1870 (mrq->cmd->opcode == SD_IO_SEND_OP_COND) ||
1871 (mrq->cmd->opcode == SD_APP_OP_COND)) &&
1872 (mrq->cmd->error == 0) &&
1873 (mrq->cmd->resp[0] == 0)) {
1874 mrq->cmd->error = -ETIMEDOUT;
1875 if (host->debug)
1876 pr_info("%s: faking timeout due to zero OCR\n",
1877 mmc_hostname(host->mmc));
1878 }
1879
1880 mmc_request_done(host->mmc, mrq);
1881 log_event("TSK>", (u32)mrq, 0);
1882 }
1883
1884 int bcm2835_sdhost_add_host(struct bcm2835_host *host)
1885 {
1886 struct mmc_host *mmc;
1887 struct dma_slave_config cfg;
1888 char pio_limit_string[20];
1889 int ret;
1890
1891 mmc = host->mmc;
1892
1893 mmc->f_max = host->max_clk;
1894 mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV;
1895
1896 mmc->max_busy_timeout = (~(unsigned int)0)/(mmc->f_max/1000);
1897
1898 pr_debug("f_max %d, f_min %d, max_busy_timeout %d\n",
1899 mmc->f_max, mmc->f_min, mmc->max_busy_timeout);
1900
1901 /* host controller capabilities */
1902 mmc->caps |=
1903 MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
1904 MMC_CAP_NEEDS_POLL | MMC_CAP_HW_RESET | MMC_CAP_ERASE |
1905 ((ALLOW_CMD23_READ|ALLOW_CMD23_WRITE) * MMC_CAP_CMD23);
1906
1907 spin_lock_init(&host->lock);
1908
1909 if (host->allow_dma) {
1910 if (IS_ERR_OR_NULL(host->dma_chan_rxtx)) {
1911 pr_err("%s: unable to initialise DMA channel. "
1912 "Falling back to PIO\n",
1913 mmc_hostname(mmc));
1914 host->use_dma = false;
1915 } else {
1916 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1917 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1918 cfg.slave_id = 13; /* DREQ channel */
1919
1920 /* Validate the slave configurations */
1921
1922 cfg.direction = DMA_MEM_TO_DEV;
1923 cfg.src_addr = 0;
1924 cfg.dst_addr = host->bus_addr + SDDATA;
1925
1926 ret = dmaengine_slave_config(host->dma_chan_rxtx, &cfg);
1927
1928 if (ret == 0) {
1929 host->dma_cfg_tx = cfg;
1930
1931 cfg.direction = DMA_DEV_TO_MEM;
1932 cfg.src_addr = host->bus_addr + SDDATA;
1933 cfg.dst_addr = 0;
1934
1935 ret = dmaengine_slave_config(host->dma_chan_rxtx, &cfg);
1936 }
1937
1938 if (ret == 0) {
1939 host->dma_cfg_rx = cfg;
1940
1941 host->use_dma = true;
1942 } else {
1943 pr_err("%s: unable to configure DMA channel. "
1944 "Falling back to PIO\n",
1945 mmc_hostname(mmc));
1946 dma_release_channel(host->dma_chan_rxtx);
1947 host->dma_chan_rxtx = NULL;
1948 host->use_dma = false;
1949 }
1950 }
1951 } else {
1952 host->use_dma = false;
1953 }
1954
1955 mmc->max_segs = 128;
1956 mmc->max_req_size = 524288;
1957 mmc->max_seg_size = mmc->max_req_size;
1958 mmc->max_blk_size = 512;
1959 mmc->max_blk_count = 65535;
1960
1961 /* report supported voltage ranges */
1962 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1963
1964 tasklet_init(&host->finish_tasklet,
1965 bcm2835_sdhost_tasklet_finish, (unsigned long)host);
1966
1967 INIT_WORK(&host->cmd_wait_wq, bcm2835_sdhost_cmd_wait_work);
1968
1969 setup_timer(&host->timer, bcm2835_sdhost_timeout,
1970 (unsigned long)host);
1971
1972 bcm2835_sdhost_init(host, 0);
1973
1974 ret = request_irq(host->irq, bcm2835_sdhost_irq, 0 /*IRQF_SHARED*/,
1975 mmc_hostname(mmc), host);
1976 if (ret) {
1977 pr_err("%s: failed to request IRQ %d: %d\n",
1978 mmc_hostname(mmc), host->irq, ret);
1979 goto untasklet;
1980 }
1981
1982 mmiowb();
1983 mmc_add_host(mmc);
1984
1985 pio_limit_string[0] = '\0';
1986 if (host->use_dma && (host->pio_limit > 0))
1987 sprintf(pio_limit_string, " (>%d)", host->pio_limit);
1988 pr_info("%s: %s loaded - DMA %s%s\n",
1989 mmc_hostname(mmc), DRIVER_NAME,
1990 host->use_dma ? "enabled" : "disabled",
1991 pio_limit_string);
1992
1993 return 0;
1994
1995 untasklet:
1996 tasklet_kill(&host->finish_tasklet);
1997
1998 return ret;
1999 }
2000
2001 static int bcm2835_sdhost_probe(struct platform_device *pdev)
2002 {
2003 struct device *dev = &pdev->dev;
2004 struct device_node *node = dev->of_node;
2005 struct clk *clk;
2006 struct resource *iomem;
2007 struct bcm2835_host *host;
2008 struct mmc_host *mmc;
2009 const __be32 *addr;
2010 u32 msg[3];
2011 int ret;
2012
2013 pr_debug("bcm2835_sdhost_probe\n");
2014 mmc = mmc_alloc_host(sizeof(*host), dev);
2015 if (!mmc)
2016 return -ENOMEM;
2017
2018 mmc->ops = &bcm2835_sdhost_ops;
2019 host = mmc_priv(mmc);
2020 host->mmc = mmc;
2021 host->pio_timeout = msecs_to_jiffies(500);
2022 host->pio_limit = 1;
2023 host->max_delay = 1; /* Warn if over 1ms */
2024 host->allow_dma = 1;
2025 spin_lock_init(&host->lock);
2026
2027 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2028 host->ioaddr = devm_ioremap_resource(dev, iomem);
2029 if (IS_ERR(host->ioaddr)) {
2030 ret = PTR_ERR(host->ioaddr);
2031 goto err;
2032 }
2033
2034 addr = of_get_address(node, 0, NULL, NULL);
2035 if (!addr) {
2036 dev_err(dev, "could not get DMA-register address\n");
2037 return -ENODEV;
2038 }
2039 host->bus_addr = be32_to_cpup(addr);
2040 pr_debug(" - ioaddr %lx, iomem->start %lx, bus_addr %lx\n",
2041 (unsigned long)host->ioaddr,
2042 (unsigned long)iomem->start,
2043 (unsigned long)host->bus_addr);
2044
2045 if (node) {
2046 /* Read any custom properties */
2047 of_property_read_u32(node,
2048 "brcm,delay-after-stop",
2049 &host->delay_after_stop);
2050 of_property_read_u32(node,
2051 "brcm,overclock-50",
2052 &host->user_overclock_50);
2053 of_property_read_u32(node,
2054 "brcm,pio-limit",
2055 &host->pio_limit);
2056 host->allow_dma =
2057 !of_property_read_bool(node, "brcm,force-pio");
2058 host->debug = of_property_read_bool(node, "brcm,debug");
2059 }
2060
2061 host->dma_chan = NULL;
2062 host->dma_desc = NULL;
2063
2064 /* Formally recognise the other way of disabling DMA */
2065 if (host->pio_limit == 0x7fffffff)
2066 host->allow_dma = false;
2067
2068 if (host->allow_dma) {
2069 if (node) {
2070 host->dma_chan_rxtx =
2071 dma_request_slave_channel(dev, "rx-tx");
2072 if (!host->dma_chan_rxtx)
2073 host->dma_chan_rxtx =
2074 dma_request_slave_channel(dev, "tx");
2075 if (!host->dma_chan_rxtx)
2076 host->dma_chan_rxtx =
2077 dma_request_slave_channel(dev, "rx");
2078 } else {
2079 dma_cap_mask_t mask;
2080
2081 dma_cap_zero(mask);
2082 /* we don't care about the channel, any would work */
2083 dma_cap_set(DMA_SLAVE, mask);
2084 host->dma_chan_rxtx =
2085 dma_request_channel(mask, NULL, NULL);
2086 }
2087 }
2088
2089 clk = devm_clk_get(dev, NULL);
2090 if (IS_ERR(clk)) {
2091 ret = PTR_ERR(clk);
2092 if (ret == -EPROBE_DEFER)
2093 dev_info(dev, "could not get clk, deferring probe\n");
2094 else
2095 dev_err(dev, "could not get clk\n");
2096 goto err;
2097 }
2098
2099 host->max_clk = clk_get_rate(clk);
2100
2101 host->irq = platform_get_irq(pdev, 0);
2102 if (host->irq <= 0) {
2103 dev_err(dev, "get IRQ failed\n");
2104 ret = -EINVAL;
2105 goto err;
2106 }
2107
2108 pr_debug(" - max_clk %lx, irq %d\n",
2109 (unsigned long)host->max_clk,
2110 (int)host->irq);
2111
2112 log_init(dev, iomem->start - host->bus_addr);
2113
2114 if (node)
2115 mmc_of_parse(mmc);
2116 else
2117 mmc->caps |= MMC_CAP_4_BIT_DATA;
2118
2119 msg[0] = 0;
2120 msg[1] = ~0;
2121 msg[2] = ~0;
2122
2123 rpi_firmware_property(rpi_firmware_get(NULL),
2124 RPI_FIRMWARE_SET_SDHOST_CLOCK,
2125 &msg, sizeof(msg));
2126
2127 host->firmware_sets_cdiv = (msg[1] != ~0);
2128
2129 ret = bcm2835_sdhost_add_host(host);
2130 if (ret)
2131 goto err;
2132
2133 platform_set_drvdata(pdev, host);
2134
2135 pr_debug("bcm2835_sdhost_probe -> OK\n");
2136
2137 return 0;
2138
2139 err:
2140 pr_debug("bcm2835_sdhost_probe -> err %d\n", ret);
2141 mmc_free_host(mmc);
2142
2143 return ret;
2144 }
2145
2146 static int bcm2835_sdhost_remove(struct platform_device *pdev)
2147 {
2148 struct bcm2835_host *host = platform_get_drvdata(pdev);
2149
2150 pr_debug("bcm2835_sdhost_remove\n");
2151
2152 mmc_remove_host(host->mmc);
2153
2154 bcm2835_sdhost_set_power(host, false);
2155
2156 free_irq(host->irq, host);
2157
2158 del_timer_sync(&host->timer);
2159
2160 tasklet_kill(&host->finish_tasklet);
2161
2162 mmc_free_host(host->mmc);
2163 platform_set_drvdata(pdev, NULL);
2164
2165 pr_debug("bcm2835_sdhost_remove - OK\n");
2166 return 0;
2167 }
2168
2169 static const struct of_device_id bcm2835_sdhost_match[] = {
2170 { .compatible = "brcm,bcm2835-sdhost" },
2171 { }
2172 };
2173 MODULE_DEVICE_TABLE(of, bcm2835_sdhost_match);
2174
2175 static struct platform_driver bcm2835_sdhost_driver = {
2176 .probe = bcm2835_sdhost_probe,
2177 .remove = bcm2835_sdhost_remove,
2178 .driver = {
2179 .name = DRIVER_NAME,
2180 .owner = THIS_MODULE,
2181 .of_match_table = bcm2835_sdhost_match,
2182 },
2183 };
2184 module_platform_driver(bcm2835_sdhost_driver);
2185
2186 MODULE_ALIAS("platform:sdhost-bcm2835");
2187 MODULE_DESCRIPTION("BCM2835 SDHost driver");
2188 MODULE_LICENSE("GPL v2");
2189 MODULE_AUTHOR("Phil Elwell");