]>
Commit | Line | Data |
---|---|---|
7d2be074 HS |
1 | /* |
2 | * Atmel MultiMedia Card Interface driver | |
3 | * | |
4 | * Copyright (C) 2004-2008 Atmel Corporation | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/blkdev.h> | |
11 | #include <linux/clk.h> | |
deec9ae3 | 12 | #include <linux/debugfs.h> |
7d2be074 | 13 | #include <linux/device.h> |
65e8b083 HS |
14 | #include <linux/dmaengine.h> |
15 | #include <linux/dma-mapping.h> | |
fbfca4b8 | 16 | #include <linux/err.h> |
3c26e170 | 17 | #include <linux/gpio.h> |
7d2be074 HS |
18 | #include <linux/init.h> |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/ioport.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/scatterlist.h> | |
deec9ae3 HS |
24 | #include <linux/seq_file.h> |
25 | #include <linux/stat.h> | |
7d2be074 HS |
26 | |
27 | #include <linux/mmc/host.h> | |
28 | ||
29 | #include <asm/atmel-mci.h> | |
30 | #include <asm/io.h> | |
31 | #include <asm/unaligned.h> | |
32 | ||
3663b736 | 33 | #include <mach/board.h> |
7d2be074 HS |
34 | |
35 | #include "atmel-mci-regs.h" | |
36 | ||
37 | #define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) | |
65e8b083 | 38 | #define ATMCI_DMA_THRESHOLD 16 |
7d2be074 HS |
39 | |
40 | enum { | |
41 | EVENT_CMD_COMPLETE = 0, | |
7d2be074 | 42 | EVENT_XFER_COMPLETE, |
c06ad258 HS |
43 | EVENT_DATA_COMPLETE, |
44 | EVENT_DATA_ERROR, | |
45 | }; | |
46 | ||
47 | enum atmel_mci_state { | |
965ebf33 HS |
48 | STATE_IDLE = 0, |
49 | STATE_SENDING_CMD, | |
c06ad258 HS |
50 | STATE_SENDING_DATA, |
51 | STATE_DATA_BUSY, | |
52 | STATE_SENDING_STOP, | |
53 | STATE_DATA_ERROR, | |
7d2be074 HS |
54 | }; |
55 | ||
65e8b083 HS |
56 | struct atmel_mci_dma { |
57 | #ifdef CONFIG_MMC_ATMELMCI_DMA | |
58 | struct dma_client client; | |
59 | struct dma_chan *chan; | |
60 | struct dma_async_tx_descriptor *data_desc; | |
61 | #endif | |
62 | }; | |
63 | ||
965ebf33 HS |
64 | /** |
65 | * struct atmel_mci - MMC controller state shared between all slots | |
66 | * @lock: Spinlock protecting the queue and associated data. | |
67 | * @regs: Pointer to MMIO registers. | |
68 | * @sg: Scatterlist entry currently being processed by PIO code, if any. | |
69 | * @pio_offset: Offset into the current scatterlist entry. | |
70 | * @cur_slot: The slot which is currently using the controller. | |
71 | * @mrq: The request currently being processed on @cur_slot, | |
72 | * or NULL if the controller is idle. | |
73 | * @cmd: The command currently being sent to the card, or NULL. | |
74 | * @data: The data currently being transferred, or NULL if no data | |
75 | * transfer is in progress. | |
65e8b083 HS |
76 | * @dma: DMA client state. |
77 | * @data_chan: DMA channel being used for the current data transfer. | |
965ebf33 HS |
78 | * @cmd_status: Snapshot of SR taken upon completion of the current |
79 | * command. Only valid when EVENT_CMD_COMPLETE is pending. | |
80 | * @data_status: Snapshot of SR taken upon completion of the current | |
81 | * data transfer. Only valid when EVENT_DATA_COMPLETE or | |
82 | * EVENT_DATA_ERROR is pending. | |
83 | * @stop_cmdr: Value to be loaded into CMDR when the stop command is | |
84 | * to be sent. | |
85 | * @tasklet: Tasklet running the request state machine. | |
86 | * @pending_events: Bitmask of events flagged by the interrupt handler | |
87 | * to be processed by the tasklet. | |
88 | * @completed_events: Bitmask of events which the state machine has | |
89 | * processed. | |
90 | * @state: Tasklet state. | |
91 | * @queue: List of slots waiting for access to the controller. | |
92 | * @need_clock_update: Update the clock rate before the next request. | |
93 | * @need_reset: Reset controller before next request. | |
94 | * @mode_reg: Value of the MR register. | |
95 | * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus | |
96 | * rate and timeout calculations. | |
97 | * @mapbase: Physical address of the MMIO registers. | |
98 | * @mck: The peripheral bus clock hooked up to the MMC controller. | |
99 | * @pdev: Platform device associated with the MMC controller. | |
100 | * @slot: Slots sharing this MMC controller. | |
101 | * | |
102 | * Locking | |
103 | * ======= | |
104 | * | |
105 | * @lock is a softirq-safe spinlock protecting @queue as well as | |
106 | * @cur_slot, @mrq and @state. These must always be updated | |
107 | * at the same time while holding @lock. | |
108 | * | |
109 | * @lock also protects mode_reg and need_clock_update since these are | |
110 | * used to synchronize mode register updates with the queue | |
111 | * processing. | |
112 | * | |
113 | * The @mrq field of struct atmel_mci_slot is also protected by @lock, | |
114 | * and must always be written at the same time as the slot is added to | |
115 | * @queue. | |
116 | * | |
117 | * @pending_events and @completed_events are accessed using atomic bit | |
118 | * operations, so they don't need any locking. | |
119 | * | |
120 | * None of the fields touched by the interrupt handler need any | |
121 | * locking. However, ordering is important: Before EVENT_DATA_ERROR or | |
122 | * EVENT_DATA_COMPLETE is set in @pending_events, all data-related | |
123 | * interrupts must be disabled and @data_status updated with a | |
124 | * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the | |
125 | * CMDRDY interupt must be disabled and @cmd_status updated with a | |
126 | * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the | |
127 | * bytes_xfered field of @data must be written. This is ensured by | |
128 | * using barriers. | |
129 | */ | |
7d2be074 | 130 | struct atmel_mci { |
965ebf33 | 131 | spinlock_t lock; |
7d2be074 HS |
132 | void __iomem *regs; |
133 | ||
134 | struct scatterlist *sg; | |
135 | unsigned int pio_offset; | |
136 | ||
965ebf33 | 137 | struct atmel_mci_slot *cur_slot; |
7d2be074 HS |
138 | struct mmc_request *mrq; |
139 | struct mmc_command *cmd; | |
140 | struct mmc_data *data; | |
141 | ||
65e8b083 HS |
142 | struct atmel_mci_dma dma; |
143 | struct dma_chan *data_chan; | |
144 | ||
7d2be074 HS |
145 | u32 cmd_status; |
146 | u32 data_status; | |
7d2be074 HS |
147 | u32 stop_cmdr; |
148 | ||
7d2be074 HS |
149 | struct tasklet_struct tasklet; |
150 | unsigned long pending_events; | |
151 | unsigned long completed_events; | |
c06ad258 | 152 | enum atmel_mci_state state; |
965ebf33 | 153 | struct list_head queue; |
7d2be074 | 154 | |
965ebf33 HS |
155 | bool need_clock_update; |
156 | bool need_reset; | |
157 | u32 mode_reg; | |
7d2be074 HS |
158 | unsigned long bus_hz; |
159 | unsigned long mapbase; | |
160 | struct clk *mck; | |
161 | struct platform_device *pdev; | |
965ebf33 HS |
162 | |
163 | struct atmel_mci_slot *slot[ATMEL_MCI_MAX_NR_SLOTS]; | |
164 | }; | |
165 | ||
166 | /** | |
167 | * struct atmel_mci_slot - MMC slot state | |
168 | * @mmc: The mmc_host representing this slot. | |
169 | * @host: The MMC controller this slot is using. | |
170 | * @sdc_reg: Value of SDCR to be written before using this slot. | |
171 | * @mrq: mmc_request currently being processed or waiting to be | |
172 | * processed, or NULL when the slot is idle. | |
173 | * @queue_node: List node for placing this node in the @queue list of | |
174 | * &struct atmel_mci. | |
175 | * @clock: Clock rate configured by set_ios(). Protected by host->lock. | |
176 | * @flags: Random state bits associated with the slot. | |
177 | * @detect_pin: GPIO pin used for card detection, or negative if not | |
178 | * available. | |
179 | * @wp_pin: GPIO pin used for card write protect sending, or negative | |
180 | * if not available. | |
181 | * @detect_timer: Timer used for debouncing @detect_pin interrupts. | |
182 | */ | |
183 | struct atmel_mci_slot { | |
184 | struct mmc_host *mmc; | |
185 | struct atmel_mci *host; | |
186 | ||
187 | u32 sdc_reg; | |
188 | ||
189 | struct mmc_request *mrq; | |
190 | struct list_head queue_node; | |
191 | ||
192 | unsigned int clock; | |
193 | unsigned long flags; | |
194 | #define ATMCI_CARD_PRESENT 0 | |
195 | #define ATMCI_CARD_NEED_INIT 1 | |
196 | #define ATMCI_SHUTDOWN 2 | |
197 | ||
198 | int detect_pin; | |
199 | int wp_pin; | |
200 | ||
201 | struct timer_list detect_timer; | |
7d2be074 HS |
202 | }; |
203 | ||
7d2be074 HS |
204 | #define atmci_test_and_clear_pending(host, event) \ |
205 | test_and_clear_bit(event, &host->pending_events) | |
7d2be074 HS |
206 | #define atmci_set_completed(host, event) \ |
207 | set_bit(event, &host->completed_events) | |
208 | #define atmci_set_pending(host, event) \ | |
209 | set_bit(event, &host->pending_events) | |
7d2be074 | 210 | |
deec9ae3 HS |
211 | /* |
212 | * The debugfs stuff below is mostly optimized away when | |
213 | * CONFIG_DEBUG_FS is not set. | |
214 | */ | |
215 | static int atmci_req_show(struct seq_file *s, void *v) | |
216 | { | |
965ebf33 HS |
217 | struct atmel_mci_slot *slot = s->private; |
218 | struct mmc_request *mrq; | |
deec9ae3 HS |
219 | struct mmc_command *cmd; |
220 | struct mmc_command *stop; | |
221 | struct mmc_data *data; | |
222 | ||
223 | /* Make sure we get a consistent snapshot */ | |
965ebf33 HS |
224 | spin_lock_bh(&slot->host->lock); |
225 | mrq = slot->mrq; | |
deec9ae3 HS |
226 | |
227 | if (mrq) { | |
228 | cmd = mrq->cmd; | |
229 | data = mrq->data; | |
230 | stop = mrq->stop; | |
231 | ||
232 | if (cmd) | |
233 | seq_printf(s, | |
234 | "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", | |
235 | cmd->opcode, cmd->arg, cmd->flags, | |
236 | cmd->resp[0], cmd->resp[1], cmd->resp[2], | |
237 | cmd->resp[2], cmd->error); | |
238 | if (data) | |
239 | seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", | |
240 | data->bytes_xfered, data->blocks, | |
241 | data->blksz, data->flags, data->error); | |
242 | if (stop) | |
243 | seq_printf(s, | |
244 | "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", | |
245 | stop->opcode, stop->arg, stop->flags, | |
246 | stop->resp[0], stop->resp[1], stop->resp[2], | |
247 | stop->resp[2], stop->error); | |
248 | } | |
249 | ||
965ebf33 | 250 | spin_unlock_bh(&slot->host->lock); |
deec9ae3 HS |
251 | |
252 | return 0; | |
253 | } | |
254 | ||
255 | static int atmci_req_open(struct inode *inode, struct file *file) | |
256 | { | |
257 | return single_open(file, atmci_req_show, inode->i_private); | |
258 | } | |
259 | ||
260 | static const struct file_operations atmci_req_fops = { | |
261 | .owner = THIS_MODULE, | |
262 | .open = atmci_req_open, | |
263 | .read = seq_read, | |
264 | .llseek = seq_lseek, | |
265 | .release = single_release, | |
266 | }; | |
267 | ||
268 | static void atmci_show_status_reg(struct seq_file *s, | |
269 | const char *regname, u32 value) | |
270 | { | |
271 | static const char *sr_bit[] = { | |
272 | [0] = "CMDRDY", | |
273 | [1] = "RXRDY", | |
274 | [2] = "TXRDY", | |
275 | [3] = "BLKE", | |
276 | [4] = "DTIP", | |
277 | [5] = "NOTBUSY", | |
278 | [8] = "SDIOIRQA", | |
279 | [9] = "SDIOIRQB", | |
280 | [16] = "RINDE", | |
281 | [17] = "RDIRE", | |
282 | [18] = "RCRCE", | |
283 | [19] = "RENDE", | |
284 | [20] = "RTOE", | |
285 | [21] = "DCRCE", | |
286 | [22] = "DTOE", | |
287 | [30] = "OVRE", | |
288 | [31] = "UNRE", | |
289 | }; | |
290 | unsigned int i; | |
291 | ||
292 | seq_printf(s, "%s:\t0x%08x", regname, value); | |
293 | for (i = 0; i < ARRAY_SIZE(sr_bit); i++) { | |
294 | if (value & (1 << i)) { | |
295 | if (sr_bit[i]) | |
296 | seq_printf(s, " %s", sr_bit[i]); | |
297 | else | |
298 | seq_puts(s, " UNKNOWN"); | |
299 | } | |
300 | } | |
301 | seq_putc(s, '\n'); | |
302 | } | |
303 | ||
304 | static int atmci_regs_show(struct seq_file *s, void *v) | |
305 | { | |
306 | struct atmel_mci *host = s->private; | |
307 | u32 *buf; | |
308 | ||
309 | buf = kmalloc(MCI_REGS_SIZE, GFP_KERNEL); | |
310 | if (!buf) | |
311 | return -ENOMEM; | |
312 | ||
965ebf33 HS |
313 | /* |
314 | * Grab a more or less consistent snapshot. Note that we're | |
315 | * not disabling interrupts, so IMR and SR may not be | |
316 | * consistent. | |
317 | */ | |
318 | spin_lock_bh(&host->lock); | |
87e60f2b | 319 | clk_enable(host->mck); |
deec9ae3 | 320 | memcpy_fromio(buf, host->regs, MCI_REGS_SIZE); |
87e60f2b | 321 | clk_disable(host->mck); |
965ebf33 | 322 | spin_unlock_bh(&host->lock); |
deec9ae3 HS |
323 | |
324 | seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n", | |
325 | buf[MCI_MR / 4], | |
326 | buf[MCI_MR / 4] & MCI_MR_RDPROOF ? " RDPROOF" : "", | |
327 | buf[MCI_MR / 4] & MCI_MR_WRPROOF ? " WRPROOF" : "", | |
328 | buf[MCI_MR / 4] & 0xff); | |
329 | seq_printf(s, "DTOR:\t0x%08x\n", buf[MCI_DTOR / 4]); | |
330 | seq_printf(s, "SDCR:\t0x%08x\n", buf[MCI_SDCR / 4]); | |
331 | seq_printf(s, "ARGR:\t0x%08x\n", buf[MCI_ARGR / 4]); | |
332 | seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n", | |
333 | buf[MCI_BLKR / 4], | |
334 | buf[MCI_BLKR / 4] & 0xffff, | |
335 | (buf[MCI_BLKR / 4] >> 16) & 0xffff); | |
336 | ||
337 | /* Don't read RSPR and RDR; it will consume the data there */ | |
338 | ||
339 | atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]); | |
340 | atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]); | |
341 | ||
b17339a1 HS |
342 | kfree(buf); |
343 | ||
deec9ae3 HS |
344 | return 0; |
345 | } | |
346 | ||
347 | static int atmci_regs_open(struct inode *inode, struct file *file) | |
348 | { | |
349 | return single_open(file, atmci_regs_show, inode->i_private); | |
350 | } | |
351 | ||
352 | static const struct file_operations atmci_regs_fops = { | |
353 | .owner = THIS_MODULE, | |
354 | .open = atmci_regs_open, | |
355 | .read = seq_read, | |
356 | .llseek = seq_lseek, | |
357 | .release = single_release, | |
358 | }; | |
359 | ||
965ebf33 | 360 | static void atmci_init_debugfs(struct atmel_mci_slot *slot) |
deec9ae3 | 361 | { |
965ebf33 HS |
362 | struct mmc_host *mmc = slot->mmc; |
363 | struct atmel_mci *host = slot->host; | |
364 | struct dentry *root; | |
365 | struct dentry *node; | |
deec9ae3 | 366 | |
deec9ae3 HS |
367 | root = mmc->debugfs_root; |
368 | if (!root) | |
369 | return; | |
370 | ||
371 | node = debugfs_create_file("regs", S_IRUSR, root, host, | |
372 | &atmci_regs_fops); | |
373 | if (IS_ERR(node)) | |
374 | return; | |
375 | if (!node) | |
376 | goto err; | |
377 | ||
965ebf33 | 378 | node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops); |
deec9ae3 HS |
379 | if (!node) |
380 | goto err; | |
381 | ||
c06ad258 HS |
382 | node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); |
383 | if (!node) | |
384 | goto err; | |
385 | ||
deec9ae3 HS |
386 | node = debugfs_create_x32("pending_events", S_IRUSR, root, |
387 | (u32 *)&host->pending_events); | |
388 | if (!node) | |
389 | goto err; | |
390 | ||
391 | node = debugfs_create_x32("completed_events", S_IRUSR, root, | |
392 | (u32 *)&host->completed_events); | |
393 | if (!node) | |
394 | goto err; | |
395 | ||
396 | return; | |
397 | ||
398 | err: | |
965ebf33 | 399 | dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); |
deec9ae3 | 400 | } |
7d2be074 | 401 | |
7d2be074 HS |
402 | static inline unsigned int ns_to_clocks(struct atmel_mci *host, |
403 | unsigned int ns) | |
404 | { | |
405 | return (ns * (host->bus_hz / 1000000) + 999) / 1000; | |
406 | } | |
407 | ||
408 | static void atmci_set_timeout(struct atmel_mci *host, | |
965ebf33 | 409 | struct atmel_mci_slot *slot, struct mmc_data *data) |
7d2be074 HS |
410 | { |
411 | static unsigned dtomul_to_shift[] = { | |
412 | 0, 4, 7, 8, 10, 12, 16, 20 | |
413 | }; | |
414 | unsigned timeout; | |
415 | unsigned dtocyc; | |
416 | unsigned dtomul; | |
417 | ||
418 | timeout = ns_to_clocks(host, data->timeout_ns) + data->timeout_clks; | |
419 | ||
420 | for (dtomul = 0; dtomul < 8; dtomul++) { | |
421 | unsigned shift = dtomul_to_shift[dtomul]; | |
422 | dtocyc = (timeout + (1 << shift) - 1) >> shift; | |
423 | if (dtocyc < 15) | |
424 | break; | |
425 | } | |
426 | ||
427 | if (dtomul >= 8) { | |
428 | dtomul = 7; | |
429 | dtocyc = 15; | |
430 | } | |
431 | ||
965ebf33 | 432 | dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n", |
7d2be074 HS |
433 | dtocyc << dtomul_to_shift[dtomul]); |
434 | mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc))); | |
435 | } | |
436 | ||
437 | /* | |
438 | * Return mask with command flags to be enabled for this command. | |
439 | */ | |
440 | static u32 atmci_prepare_command(struct mmc_host *mmc, | |
441 | struct mmc_command *cmd) | |
442 | { | |
443 | struct mmc_data *data; | |
444 | u32 cmdr; | |
445 | ||
446 | cmd->error = -EINPROGRESS; | |
447 | ||
448 | cmdr = MCI_CMDR_CMDNB(cmd->opcode); | |
449 | ||
450 | if (cmd->flags & MMC_RSP_PRESENT) { | |
451 | if (cmd->flags & MMC_RSP_136) | |
452 | cmdr |= MCI_CMDR_RSPTYP_136BIT; | |
453 | else | |
454 | cmdr |= MCI_CMDR_RSPTYP_48BIT; | |
455 | } | |
456 | ||
457 | /* | |
458 | * This should really be MAXLAT_5 for CMD2 and ACMD41, but | |
459 | * it's too difficult to determine whether this is an ACMD or | |
460 | * not. Better make it 64. | |
461 | */ | |
462 | cmdr |= MCI_CMDR_MAXLAT_64CYC; | |
463 | ||
464 | if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN) | |
465 | cmdr |= MCI_CMDR_OPDCMD; | |
466 | ||
467 | data = cmd->data; | |
468 | if (data) { | |
469 | cmdr |= MCI_CMDR_START_XFER; | |
470 | if (data->flags & MMC_DATA_STREAM) | |
471 | cmdr |= MCI_CMDR_STREAM; | |
472 | else if (data->blocks > 1) | |
473 | cmdr |= MCI_CMDR_MULTI_BLOCK; | |
474 | else | |
475 | cmdr |= MCI_CMDR_BLOCK; | |
476 | ||
477 | if (data->flags & MMC_DATA_READ) | |
478 | cmdr |= MCI_CMDR_TRDIR_READ; | |
479 | } | |
480 | ||
481 | return cmdr; | |
482 | } | |
483 | ||
484 | static void atmci_start_command(struct atmel_mci *host, | |
965ebf33 | 485 | struct mmc_command *cmd, u32 cmd_flags) |
7d2be074 | 486 | { |
7d2be074 HS |
487 | WARN_ON(host->cmd); |
488 | host->cmd = cmd; | |
489 | ||
965ebf33 | 490 | dev_vdbg(&host->pdev->dev, |
7d2be074 HS |
491 | "start command: ARGR=0x%08x CMDR=0x%08x\n", |
492 | cmd->arg, cmd_flags); | |
493 | ||
494 | mci_writel(host, ARGR, cmd->arg); | |
495 | mci_writel(host, CMDR, cmd_flags); | |
496 | } | |
497 | ||
965ebf33 | 498 | static void send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) |
7d2be074 | 499 | { |
7d2be074 HS |
500 | atmci_start_command(host, data->stop, host->stop_cmdr); |
501 | mci_writel(host, IER, MCI_CMDRDY); | |
502 | } | |
503 | ||
65e8b083 HS |
504 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
505 | static void atmci_dma_cleanup(struct atmel_mci *host) | |
506 | { | |
507 | struct mmc_data *data = host->data; | |
508 | ||
509 | dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, | |
510 | ((data->flags & MMC_DATA_WRITE) | |
511 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); | |
512 | } | |
513 | ||
514 | static void atmci_stop_dma(struct atmel_mci *host) | |
515 | { | |
516 | struct dma_chan *chan = host->data_chan; | |
517 | ||
518 | if (chan) { | |
519 | chan->device->device_terminate_all(chan); | |
520 | atmci_dma_cleanup(host); | |
521 | } else { | |
522 | /* Data transfer was stopped by the interrupt handler */ | |
523 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | |
524 | mci_writel(host, IER, MCI_NOTBUSY); | |
525 | } | |
526 | } | |
527 | ||
528 | /* This function is called by the DMA driver from tasklet context. */ | |
529 | static void atmci_dma_complete(void *arg) | |
530 | { | |
531 | struct atmel_mci *host = arg; | |
532 | struct mmc_data *data = host->data; | |
533 | ||
534 | dev_vdbg(&host->pdev->dev, "DMA complete\n"); | |
535 | ||
536 | atmci_dma_cleanup(host); | |
537 | ||
538 | /* | |
539 | * If the card was removed, data will be NULL. No point trying | |
540 | * to send the stop command or waiting for NBUSY in this case. | |
541 | */ | |
542 | if (data) { | |
543 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | |
544 | tasklet_schedule(&host->tasklet); | |
545 | ||
546 | /* | |
547 | * Regardless of what the documentation says, we have | |
548 | * to wait for NOTBUSY even after block read | |
549 | * operations. | |
550 | * | |
551 | * When the DMA transfer is complete, the controller | |
552 | * may still be reading the CRC from the card, i.e. | |
553 | * the data transfer is still in progress and we | |
554 | * haven't seen all the potential error bits yet. | |
555 | * | |
556 | * The interrupt handler will schedule a different | |
557 | * tasklet to finish things up when the data transfer | |
558 | * is completely done. | |
559 | * | |
560 | * We may not complete the mmc request here anyway | |
561 | * because the mmc layer may call back and cause us to | |
562 | * violate the "don't submit new operations from the | |
563 | * completion callback" rule of the dma engine | |
564 | * framework. | |
565 | */ | |
566 | mci_writel(host, IER, MCI_NOTBUSY); | |
567 | } | |
568 | } | |
569 | ||
570 | static int | |
571 | atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) | |
572 | { | |
573 | struct dma_chan *chan; | |
574 | struct dma_async_tx_descriptor *desc; | |
575 | struct scatterlist *sg; | |
576 | unsigned int i; | |
577 | enum dma_data_direction direction; | |
578 | ||
579 | /* | |
580 | * We don't do DMA on "complex" transfers, i.e. with | |
581 | * non-word-aligned buffers or lengths. Also, we don't bother | |
582 | * with all the DMA setup overhead for short transfers. | |
583 | */ | |
584 | if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD) | |
585 | return -EINVAL; | |
586 | if (data->blksz & 3) | |
587 | return -EINVAL; | |
588 | ||
589 | for_each_sg(data->sg, sg, data->sg_len, i) { | |
590 | if (sg->offset & 3 || sg->length & 3) | |
591 | return -EINVAL; | |
592 | } | |
593 | ||
594 | /* If we don't have a channel, we can't do DMA */ | |
595 | chan = host->dma.chan; | |
596 | if (chan) { | |
597 | dma_chan_get(chan); | |
598 | host->data_chan = chan; | |
599 | } | |
600 | ||
601 | if (!chan) | |
602 | return -ENODEV; | |
603 | ||
604 | if (data->flags & MMC_DATA_READ) | |
605 | direction = DMA_FROM_DEVICE; | |
606 | else | |
607 | direction = DMA_TO_DEVICE; | |
608 | ||
609 | desc = chan->device->device_prep_slave_sg(chan, | |
610 | data->sg, data->sg_len, direction, | |
611 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
612 | if (!desc) | |
613 | return -ENOMEM; | |
614 | ||
615 | host->dma.data_desc = desc; | |
616 | desc->callback = atmci_dma_complete; | |
617 | desc->callback_param = host; | |
618 | desc->tx_submit(desc); | |
619 | ||
620 | /* Go! */ | |
621 | chan->device->device_issue_pending(chan); | |
622 | ||
623 | return 0; | |
624 | } | |
625 | ||
626 | #else /* CONFIG_MMC_ATMELMCI_DMA */ | |
627 | ||
628 | static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) | |
629 | { | |
630 | return -ENOSYS; | |
631 | } | |
632 | ||
633 | static void atmci_stop_dma(struct atmel_mci *host) | |
634 | { | |
635 | /* Data transfer was stopped by the interrupt handler */ | |
636 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | |
637 | mci_writel(host, IER, MCI_NOTBUSY); | |
638 | } | |
639 | ||
640 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ | |
641 | ||
7d2be074 HS |
642 | /* |
643 | * Returns a mask of interrupt flags to be enabled after the whole | |
644 | * request has been prepared. | |
645 | */ | |
965ebf33 | 646 | static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) |
7d2be074 | 647 | { |
965ebf33 | 648 | u32 iflags; |
7d2be074 HS |
649 | |
650 | data->error = -EINPROGRESS; | |
651 | ||
652 | WARN_ON(host->data); | |
653 | host->sg = NULL; | |
654 | host->data = data; | |
655 | ||
7d2be074 | 656 | iflags = ATMCI_DATA_ERROR_FLAGS; |
65e8b083 HS |
657 | if (atmci_submit_data_dma(host, data)) { |
658 | host->data_chan = NULL; | |
965ebf33 | 659 | |
65e8b083 HS |
660 | /* |
661 | * Errata: MMC data write operation with less than 12 | |
662 | * bytes is impossible. | |
663 | * | |
664 | * Errata: MCI Transmit Data Register (TDR) FIFO | |
665 | * corruption when length is not multiple of 4. | |
666 | */ | |
667 | if (data->blocks * data->blksz < 12 | |
668 | || (data->blocks * data->blksz) & 3) | |
669 | host->need_reset = true; | |
965ebf33 | 670 | |
65e8b083 HS |
671 | host->sg = data->sg; |
672 | host->pio_offset = 0; | |
673 | if (data->flags & MMC_DATA_READ) | |
674 | iflags |= MCI_RXRDY; | |
675 | else | |
676 | iflags |= MCI_TXRDY; | |
677 | } | |
7d2be074 HS |
678 | |
679 | return iflags; | |
680 | } | |
681 | ||
965ebf33 HS |
682 | static void atmci_start_request(struct atmel_mci *host, |
683 | struct atmel_mci_slot *slot) | |
7d2be074 | 684 | { |
965ebf33 | 685 | struct mmc_request *mrq; |
7d2be074 | 686 | struct mmc_command *cmd; |
965ebf33 | 687 | struct mmc_data *data; |
7d2be074 | 688 | u32 iflags; |
965ebf33 | 689 | u32 cmdflags; |
7d2be074 | 690 | |
965ebf33 HS |
691 | mrq = slot->mrq; |
692 | host->cur_slot = slot; | |
7d2be074 | 693 | host->mrq = mrq; |
965ebf33 | 694 | |
7d2be074 HS |
695 | host->pending_events = 0; |
696 | host->completed_events = 0; | |
ca55f46e | 697 | host->data_status = 0; |
7d2be074 | 698 | |
965ebf33 HS |
699 | if (host->need_reset) { |
700 | mci_writel(host, CR, MCI_CR_SWRST); | |
701 | mci_writel(host, CR, MCI_CR_MCIEN); | |
702 | mci_writel(host, MR, host->mode_reg); | |
703 | host->need_reset = false; | |
704 | } | |
705 | mci_writel(host, SDCR, slot->sdc_reg); | |
706 | ||
707 | iflags = mci_readl(host, IMR); | |
708 | if (iflags) | |
709 | dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", | |
710 | iflags); | |
711 | ||
712 | if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) { | |
713 | /* Send init sequence (74 clock cycles) */ | |
714 | mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT); | |
715 | while (!(mci_readl(host, SR) & MCI_CMDRDY)) | |
716 | cpu_relax(); | |
717 | } | |
7d2be074 HS |
718 | data = mrq->data; |
719 | if (data) { | |
965ebf33 | 720 | atmci_set_timeout(host, slot, data); |
a252e3e3 HS |
721 | |
722 | /* Must set block count/size before sending command */ | |
723 | mci_writel(host, BLKR, MCI_BCNT(data->blocks) | |
724 | | MCI_BLKLEN(data->blksz)); | |
965ebf33 HS |
725 | dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", |
726 | MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); | |
7d2be074 HS |
727 | } |
728 | ||
729 | iflags = MCI_CMDRDY; | |
730 | cmd = mrq->cmd; | |
965ebf33 | 731 | cmdflags = atmci_prepare_command(slot->mmc, cmd); |
7d2be074 HS |
732 | atmci_start_command(host, cmd, cmdflags); |
733 | ||
734 | if (data) | |
965ebf33 | 735 | iflags |= atmci_submit_data(host, data); |
7d2be074 HS |
736 | |
737 | if (mrq->stop) { | |
965ebf33 | 738 | host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); |
7d2be074 HS |
739 | host->stop_cmdr |= MCI_CMDR_STOP_XFER; |
740 | if (!(data->flags & MMC_DATA_WRITE)) | |
741 | host->stop_cmdr |= MCI_CMDR_TRDIR_READ; | |
742 | if (data->flags & MMC_DATA_STREAM) | |
743 | host->stop_cmdr |= MCI_CMDR_STREAM; | |
744 | else | |
745 | host->stop_cmdr |= MCI_CMDR_MULTI_BLOCK; | |
746 | } | |
747 | ||
748 | /* | |
749 | * We could have enabled interrupts earlier, but I suspect | |
750 | * that would open up a nice can of interesting race | |
751 | * conditions (e.g. command and data complete, but stop not | |
752 | * prepared yet.) | |
753 | */ | |
754 | mci_writel(host, IER, iflags); | |
965ebf33 | 755 | } |
7d2be074 | 756 | |
965ebf33 HS |
757 | static void atmci_queue_request(struct atmel_mci *host, |
758 | struct atmel_mci_slot *slot, struct mmc_request *mrq) | |
759 | { | |
760 | dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", | |
761 | host->state); | |
762 | ||
763 | spin_lock_bh(&host->lock); | |
764 | slot->mrq = mrq; | |
765 | if (host->state == STATE_IDLE) { | |
766 | host->state = STATE_SENDING_CMD; | |
767 | atmci_start_request(host, slot); | |
768 | } else { | |
769 | list_add_tail(&slot->queue_node, &host->queue); | |
770 | } | |
771 | spin_unlock_bh(&host->lock); | |
772 | } | |
7d2be074 | 773 | |
965ebf33 HS |
774 | static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) |
775 | { | |
776 | struct atmel_mci_slot *slot = mmc_priv(mmc); | |
777 | struct atmel_mci *host = slot->host; | |
778 | struct mmc_data *data; | |
779 | ||
780 | WARN_ON(slot->mrq); | |
781 | ||
782 | /* | |
783 | * We may "know" the card is gone even though there's still an | |
784 | * electrical connection. If so, we really need to communicate | |
785 | * this to the MMC core since there won't be any more | |
786 | * interrupts as the card is completely removed. Otherwise, | |
787 | * the MMC core might believe the card is still there even | |
788 | * though the card was just removed very slowly. | |
789 | */ | |
790 | if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) { | |
791 | mrq->cmd->error = -ENOMEDIUM; | |
792 | mmc_request_done(mmc, mrq); | |
793 | return; | |
794 | } | |
795 | ||
796 | /* We don't support multiple blocks of weird lengths. */ | |
797 | data = mrq->data; | |
798 | if (data && data->blocks > 1 && data->blksz & 3) { | |
799 | mrq->cmd->error = -EINVAL; | |
800 | mmc_request_done(mmc, mrq); | |
801 | } | |
802 | ||
803 | atmci_queue_request(host, slot, mrq); | |
7d2be074 HS |
804 | } |
805 | ||
806 | static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |
807 | { | |
965ebf33 HS |
808 | struct atmel_mci_slot *slot = mmc_priv(mmc); |
809 | struct atmel_mci *host = slot->host; | |
810 | unsigned int i; | |
7d2be074 | 811 | |
965ebf33 | 812 | slot->sdc_reg &= ~MCI_SDCBUS_MASK; |
945533b5 HS |
813 | switch (ios->bus_width) { |
814 | case MMC_BUS_WIDTH_1: | |
965ebf33 | 815 | slot->sdc_reg |= MCI_SDCBUS_1BIT; |
945533b5 HS |
816 | break; |
817 | case MMC_BUS_WIDTH_4: | |
965ebf33 | 818 | slot->sdc_reg = MCI_SDCBUS_4BIT; |
945533b5 HS |
819 | break; |
820 | } | |
821 | ||
7d2be074 | 822 | if (ios->clock) { |
965ebf33 | 823 | unsigned int clock_min = ~0U; |
7d2be074 HS |
824 | u32 clkdiv; |
825 | ||
965ebf33 HS |
826 | spin_lock_bh(&host->lock); |
827 | if (!host->mode_reg) { | |
945533b5 | 828 | clk_enable(host->mck); |
965ebf33 HS |
829 | mci_writel(host, CR, MCI_CR_SWRST); |
830 | mci_writel(host, CR, MCI_CR_MCIEN); | |
831 | } | |
945533b5 | 832 | |
965ebf33 HS |
833 | /* |
834 | * Use mirror of ios->clock to prevent race with mmc | |
835 | * core ios update when finding the minimum. | |
836 | */ | |
837 | slot->clock = ios->clock; | |
838 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { | |
839 | if (host->slot[i] && host->slot[i]->clock | |
840 | && host->slot[i]->clock < clock_min) | |
841 | clock_min = host->slot[i]->clock; | |
842 | } | |
843 | ||
844 | /* Calculate clock divider */ | |
845 | clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1; | |
7d2be074 HS |
846 | if (clkdiv > 255) { |
847 | dev_warn(&mmc->class_dev, | |
848 | "clock %u too slow; using %lu\n", | |
965ebf33 | 849 | clock_min, host->bus_hz / (2 * 256)); |
7d2be074 HS |
850 | clkdiv = 255; |
851 | } | |
852 | ||
965ebf33 HS |
853 | /* |
854 | * WRPROOF and RDPROOF prevent overruns/underruns by | |
855 | * stopping the clock when the FIFO is full/empty. | |
856 | * This state is not expected to last for long. | |
857 | */ | |
7d2be074 HS |
858 | host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF |
859 | | MCI_MR_RDPROOF; | |
7d2be074 | 860 | |
965ebf33 HS |
861 | if (list_empty(&host->queue)) |
862 | mci_writel(host, MR, host->mode_reg); | |
863 | else | |
864 | host->need_clock_update = true; | |
865 | ||
866 | spin_unlock_bh(&host->lock); | |
945533b5 | 867 | } else { |
965ebf33 HS |
868 | bool any_slot_active = false; |
869 | ||
870 | spin_lock_bh(&host->lock); | |
871 | slot->clock = 0; | |
872 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { | |
873 | if (host->slot[i] && host->slot[i]->clock) { | |
874 | any_slot_active = true; | |
875 | break; | |
876 | } | |
945533b5 | 877 | } |
965ebf33 HS |
878 | if (!any_slot_active) { |
879 | mci_writel(host, CR, MCI_CR_MCIDIS); | |
880 | if (host->mode_reg) { | |
881 | mci_readl(host, MR); | |
882 | clk_disable(host->mck); | |
883 | } | |
884 | host->mode_reg = 0; | |
885 | } | |
886 | spin_unlock_bh(&host->lock); | |
7d2be074 HS |
887 | } |
888 | ||
889 | switch (ios->power_mode) { | |
965ebf33 HS |
890 | case MMC_POWER_UP: |
891 | set_bit(ATMCI_CARD_NEED_INIT, &slot->flags); | |
892 | break; | |
7d2be074 HS |
893 | default: |
894 | /* | |
895 | * TODO: None of the currently available AVR32-based | |
896 | * boards allow MMC power to be turned off. Implement | |
897 | * power control when this can be tested properly. | |
965ebf33 HS |
898 | * |
899 | * We also need to hook this into the clock management | |
900 | * somehow so that newly inserted cards aren't | |
901 | * subjected to a fast clock before we have a chance | |
902 | * to figure out what the maximum rate is. Currently, | |
903 | * there's no way to avoid this, and there never will | |
904 | * be for boards that don't support power control. | |
7d2be074 HS |
905 | */ |
906 | break; | |
907 | } | |
908 | } | |
909 | ||
910 | static int atmci_get_ro(struct mmc_host *mmc) | |
911 | { | |
965ebf33 HS |
912 | int read_only = -ENOSYS; |
913 | struct atmel_mci_slot *slot = mmc_priv(mmc); | |
7d2be074 | 914 | |
965ebf33 HS |
915 | if (gpio_is_valid(slot->wp_pin)) { |
916 | read_only = gpio_get_value(slot->wp_pin); | |
7d2be074 HS |
917 | dev_dbg(&mmc->class_dev, "card is %s\n", |
918 | read_only ? "read-only" : "read-write"); | |
7d2be074 HS |
919 | } |
920 | ||
921 | return read_only; | |
922 | } | |
923 | ||
965ebf33 HS |
924 | static int atmci_get_cd(struct mmc_host *mmc) |
925 | { | |
926 | int present = -ENOSYS; | |
927 | struct atmel_mci_slot *slot = mmc_priv(mmc); | |
928 | ||
929 | if (gpio_is_valid(slot->detect_pin)) { | |
930 | present = !gpio_get_value(slot->detect_pin); | |
931 | dev_dbg(&mmc->class_dev, "card is %spresent\n", | |
932 | present ? "" : "not "); | |
933 | } | |
934 | ||
935 | return present; | |
936 | } | |
937 | ||
938 | static const struct mmc_host_ops atmci_ops = { | |
7d2be074 HS |
939 | .request = atmci_request, |
940 | .set_ios = atmci_set_ios, | |
941 | .get_ro = atmci_get_ro, | |
965ebf33 | 942 | .get_cd = atmci_get_cd, |
7d2be074 HS |
943 | }; |
944 | ||
965ebf33 HS |
945 | /* Called with host->lock held */ |
946 | static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq) | |
947 | __releases(&host->lock) | |
948 | __acquires(&host->lock) | |
949 | { | |
950 | struct atmel_mci_slot *slot = NULL; | |
951 | struct mmc_host *prev_mmc = host->cur_slot->mmc; | |
952 | ||
953 | WARN_ON(host->cmd || host->data); | |
954 | ||
955 | /* | |
956 | * Update the MMC clock rate if necessary. This may be | |
957 | * necessary if set_ios() is called when a different slot is | |
958 | * busy transfering data. | |
959 | */ | |
960 | if (host->need_clock_update) | |
961 | mci_writel(host, MR, host->mode_reg); | |
962 | ||
963 | host->cur_slot->mrq = NULL; | |
964 | host->mrq = NULL; | |
965 | if (!list_empty(&host->queue)) { | |
966 | slot = list_entry(host->queue.next, | |
967 | struct atmel_mci_slot, queue_node); | |
968 | list_del(&slot->queue_node); | |
969 | dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n", | |
970 | mmc_hostname(slot->mmc)); | |
971 | host->state = STATE_SENDING_CMD; | |
972 | atmci_start_request(host, slot); | |
973 | } else { | |
974 | dev_vdbg(&host->pdev->dev, "list empty\n"); | |
975 | host->state = STATE_IDLE; | |
976 | } | |
977 | ||
978 | spin_unlock(&host->lock); | |
979 | mmc_request_done(prev_mmc, mrq); | |
980 | spin_lock(&host->lock); | |
981 | } | |
982 | ||
7d2be074 | 983 | static void atmci_command_complete(struct atmel_mci *host, |
c06ad258 | 984 | struct mmc_command *cmd) |
7d2be074 | 985 | { |
c06ad258 HS |
986 | u32 status = host->cmd_status; |
987 | ||
7d2be074 HS |
988 | /* Read the response from the card (up to 16 bytes) */ |
989 | cmd->resp[0] = mci_readl(host, RSPR); | |
990 | cmd->resp[1] = mci_readl(host, RSPR); | |
991 | cmd->resp[2] = mci_readl(host, RSPR); | |
992 | cmd->resp[3] = mci_readl(host, RSPR); | |
993 | ||
994 | if (status & MCI_RTOE) | |
995 | cmd->error = -ETIMEDOUT; | |
996 | else if ((cmd->flags & MMC_RSP_CRC) && (status & MCI_RCRCE)) | |
997 | cmd->error = -EILSEQ; | |
998 | else if (status & (MCI_RINDE | MCI_RDIRE | MCI_RENDE)) | |
999 | cmd->error = -EIO; | |
1000 | else | |
1001 | cmd->error = 0; | |
1002 | ||
1003 | if (cmd->error) { | |
965ebf33 | 1004 | dev_dbg(&host->pdev->dev, |
7d2be074 HS |
1005 | "command error: status=0x%08x\n", status); |
1006 | ||
1007 | if (cmd->data) { | |
1008 | host->data = NULL; | |
65e8b083 | 1009 | atmci_stop_dma(host); |
7d2be074 HS |
1010 | mci_writel(host, IDR, MCI_NOTBUSY |
1011 | | MCI_TXRDY | MCI_RXRDY | |
1012 | | ATMCI_DATA_ERROR_FLAGS); | |
1013 | } | |
1014 | } | |
1015 | } | |
1016 | ||
1017 | static void atmci_detect_change(unsigned long data) | |
1018 | { | |
965ebf33 HS |
1019 | struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data; |
1020 | bool present; | |
1021 | bool present_old; | |
7d2be074 HS |
1022 | |
1023 | /* | |
965ebf33 HS |
1024 | * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before |
1025 | * freeing the interrupt. We must not re-enable the interrupt | |
1026 | * if it has been freed, and if we're shutting down, it | |
1027 | * doesn't really matter whether the card is present or not. | |
7d2be074 HS |
1028 | */ |
1029 | smp_rmb(); | |
965ebf33 | 1030 | if (test_bit(ATMCI_SHUTDOWN, &slot->flags)) |
7d2be074 HS |
1031 | return; |
1032 | ||
965ebf33 HS |
1033 | enable_irq(gpio_to_irq(slot->detect_pin)); |
1034 | present = !gpio_get_value(slot->detect_pin); | |
1035 | present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags); | |
7d2be074 | 1036 | |
965ebf33 HS |
1037 | dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n", |
1038 | present, present_old); | |
7d2be074 | 1039 | |
965ebf33 HS |
1040 | if (present != present_old) { |
1041 | struct atmel_mci *host = slot->host; | |
1042 | struct mmc_request *mrq; | |
1043 | ||
1044 | dev_dbg(&slot->mmc->class_dev, "card %s\n", | |
7d2be074 | 1045 | present ? "inserted" : "removed"); |
7d2be074 | 1046 | |
965ebf33 HS |
1047 | spin_lock(&host->lock); |
1048 | ||
1049 | if (!present) | |
1050 | clear_bit(ATMCI_CARD_PRESENT, &slot->flags); | |
1051 | else | |
1052 | set_bit(ATMCI_CARD_PRESENT, &slot->flags); | |
7d2be074 HS |
1053 | |
1054 | /* Clean up queue if present */ | |
965ebf33 | 1055 | mrq = slot->mrq; |
7d2be074 | 1056 | if (mrq) { |
965ebf33 HS |
1057 | if (mrq == host->mrq) { |
1058 | /* | |
1059 | * Reset controller to terminate any ongoing | |
1060 | * commands or data transfers. | |
1061 | */ | |
1062 | mci_writel(host, CR, MCI_CR_SWRST); | |
1063 | mci_writel(host, CR, MCI_CR_MCIEN); | |
1064 | mci_writel(host, MR, host->mode_reg); | |
1065 | ||
1066 | host->data = NULL; | |
1067 | host->cmd = NULL; | |
1068 | ||
1069 | switch (host->state) { | |
1070 | case STATE_IDLE: | |
c06ad258 | 1071 | break; |
965ebf33 HS |
1072 | case STATE_SENDING_CMD: |
1073 | mrq->cmd->error = -ENOMEDIUM; | |
1074 | if (!mrq->data) | |
1075 | break; | |
1076 | /* fall through */ | |
1077 | case STATE_SENDING_DATA: | |
c06ad258 | 1078 | mrq->data->error = -ENOMEDIUM; |
65e8b083 | 1079 | atmci_stop_dma(host); |
c06ad258 | 1080 | break; |
965ebf33 HS |
1081 | case STATE_DATA_BUSY: |
1082 | case STATE_DATA_ERROR: | |
1083 | if (mrq->data->error == -EINPROGRESS) | |
1084 | mrq->data->error = -ENOMEDIUM; | |
1085 | if (!mrq->stop) | |
1086 | break; | |
1087 | /* fall through */ | |
1088 | case STATE_SENDING_STOP: | |
1089 | mrq->stop->error = -ENOMEDIUM; | |
1090 | break; | |
1091 | } | |
7d2be074 | 1092 | |
965ebf33 HS |
1093 | atmci_request_end(host, mrq); |
1094 | } else { | |
1095 | list_del(&slot->queue_node); | |
1096 | mrq->cmd->error = -ENOMEDIUM; | |
1097 | if (mrq->data) | |
1098 | mrq->data->error = -ENOMEDIUM; | |
1099 | if (mrq->stop) | |
1100 | mrq->stop->error = -ENOMEDIUM; | |
1101 | ||
1102 | spin_unlock(&host->lock); | |
1103 | mmc_request_done(slot->mmc, mrq); | |
1104 | spin_lock(&host->lock); | |
1105 | } | |
7d2be074 | 1106 | } |
965ebf33 | 1107 | spin_unlock(&host->lock); |
7d2be074 | 1108 | |
965ebf33 | 1109 | mmc_detect_change(slot->mmc, 0); |
7d2be074 HS |
1110 | } |
1111 | } | |
1112 | ||
1113 | static void atmci_tasklet_func(unsigned long priv) | |
1114 | { | |
965ebf33 | 1115 | struct atmel_mci *host = (struct atmel_mci *)priv; |
7d2be074 HS |
1116 | struct mmc_request *mrq = host->mrq; |
1117 | struct mmc_data *data = host->data; | |
c06ad258 HS |
1118 | struct mmc_command *cmd = host->cmd; |
1119 | enum atmel_mci_state state = host->state; | |
1120 | enum atmel_mci_state prev_state; | |
1121 | u32 status; | |
1122 | ||
965ebf33 HS |
1123 | spin_lock(&host->lock); |
1124 | ||
c06ad258 | 1125 | state = host->state; |
7d2be074 | 1126 | |
965ebf33 | 1127 | dev_vdbg(&host->pdev->dev, |
c06ad258 HS |
1128 | "tasklet: state %u pending/completed/mask %lx/%lx/%x\n", |
1129 | state, host->pending_events, host->completed_events, | |
7d2be074 HS |
1130 | mci_readl(host, IMR)); |
1131 | ||
c06ad258 HS |
1132 | do { |
1133 | prev_state = state; | |
7d2be074 | 1134 | |
c06ad258 | 1135 | switch (state) { |
965ebf33 HS |
1136 | case STATE_IDLE: |
1137 | break; | |
1138 | ||
c06ad258 HS |
1139 | case STATE_SENDING_CMD: |
1140 | if (!atmci_test_and_clear_pending(host, | |
1141 | EVENT_CMD_COMPLETE)) | |
1142 | break; | |
7d2be074 | 1143 | |
c06ad258 HS |
1144 | host->cmd = NULL; |
1145 | atmci_set_completed(host, EVENT_CMD_COMPLETE); | |
1146 | atmci_command_complete(host, mrq->cmd); | |
1147 | if (!mrq->data || cmd->error) { | |
965ebf33 HS |
1148 | atmci_request_end(host, host->mrq); |
1149 | goto unlock; | |
c06ad258 | 1150 | } |
7d2be074 | 1151 | |
c06ad258 HS |
1152 | prev_state = state = STATE_SENDING_DATA; |
1153 | /* fall through */ | |
7d2be074 | 1154 | |
c06ad258 HS |
1155 | case STATE_SENDING_DATA: |
1156 | if (atmci_test_and_clear_pending(host, | |
1157 | EVENT_DATA_ERROR)) { | |
65e8b083 | 1158 | atmci_stop_dma(host); |
c06ad258 | 1159 | if (data->stop) |
965ebf33 | 1160 | send_stop_cmd(host, data); |
c06ad258 HS |
1161 | state = STATE_DATA_ERROR; |
1162 | break; | |
1163 | } | |
7d2be074 | 1164 | |
c06ad258 HS |
1165 | if (!atmci_test_and_clear_pending(host, |
1166 | EVENT_XFER_COMPLETE)) | |
1167 | break; | |
7d2be074 | 1168 | |
c06ad258 HS |
1169 | atmci_set_completed(host, EVENT_XFER_COMPLETE); |
1170 | prev_state = state = STATE_DATA_BUSY; | |
1171 | /* fall through */ | |
7d2be074 | 1172 | |
c06ad258 HS |
1173 | case STATE_DATA_BUSY: |
1174 | if (!atmci_test_and_clear_pending(host, | |
1175 | EVENT_DATA_COMPLETE)) | |
1176 | break; | |
1177 | ||
1178 | host->data = NULL; | |
1179 | atmci_set_completed(host, EVENT_DATA_COMPLETE); | |
1180 | status = host->data_status; | |
1181 | if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) { | |
1182 | if (status & MCI_DTOE) { | |
965ebf33 | 1183 | dev_dbg(&host->pdev->dev, |
c06ad258 HS |
1184 | "data timeout error\n"); |
1185 | data->error = -ETIMEDOUT; | |
1186 | } else if (status & MCI_DCRCE) { | |
965ebf33 | 1187 | dev_dbg(&host->pdev->dev, |
c06ad258 HS |
1188 | "data CRC error\n"); |
1189 | data->error = -EILSEQ; | |
1190 | } else { | |
965ebf33 | 1191 | dev_dbg(&host->pdev->dev, |
c06ad258 HS |
1192 | "data FIFO error (status=%08x)\n", |
1193 | status); | |
1194 | data->error = -EIO; | |
1195 | } | |
1196 | } else { | |
1197 | data->bytes_xfered = data->blocks * data->blksz; | |
1198 | data->error = 0; | |
1199 | } | |
1200 | ||
1201 | if (!data->stop) { | |
965ebf33 HS |
1202 | atmci_request_end(host, host->mrq); |
1203 | goto unlock; | |
c06ad258 | 1204 | } |
7d2be074 | 1205 | |
c06ad258 HS |
1206 | prev_state = state = STATE_SENDING_STOP; |
1207 | if (!data->error) | |
965ebf33 | 1208 | send_stop_cmd(host, data); |
c06ad258 HS |
1209 | /* fall through */ |
1210 | ||
1211 | case STATE_SENDING_STOP: | |
1212 | if (!atmci_test_and_clear_pending(host, | |
1213 | EVENT_CMD_COMPLETE)) | |
1214 | break; | |
1215 | ||
1216 | host->cmd = NULL; | |
1217 | atmci_command_complete(host, mrq->stop); | |
965ebf33 HS |
1218 | atmci_request_end(host, host->mrq); |
1219 | goto unlock; | |
c06ad258 HS |
1220 | |
1221 | case STATE_DATA_ERROR: | |
1222 | if (!atmci_test_and_clear_pending(host, | |
1223 | EVENT_XFER_COMPLETE)) | |
1224 | break; | |
1225 | ||
1226 | state = STATE_DATA_BUSY; | |
1227 | break; | |
1228 | } | |
1229 | } while (state != prev_state); | |
1230 | ||
1231 | host->state = state; | |
965ebf33 HS |
1232 | |
1233 | unlock: | |
1234 | spin_unlock(&host->lock); | |
7d2be074 HS |
1235 | } |
1236 | ||
1237 | static void atmci_read_data_pio(struct atmel_mci *host) | |
1238 | { | |
1239 | struct scatterlist *sg = host->sg; | |
1240 | void *buf = sg_virt(sg); | |
1241 | unsigned int offset = host->pio_offset; | |
1242 | struct mmc_data *data = host->data; | |
1243 | u32 value; | |
1244 | u32 status; | |
1245 | unsigned int nbytes = 0; | |
1246 | ||
1247 | do { | |
1248 | value = mci_readl(host, RDR); | |
1249 | if (likely(offset + 4 <= sg->length)) { | |
1250 | put_unaligned(value, (u32 *)(buf + offset)); | |
1251 | ||
1252 | offset += 4; | |
1253 | nbytes += 4; | |
1254 | ||
1255 | if (offset == sg->length) { | |
1256 | host->sg = sg = sg_next(sg); | |
1257 | if (!sg) | |
1258 | goto done; | |
1259 | ||
1260 | offset = 0; | |
1261 | buf = sg_virt(sg); | |
1262 | } | |
1263 | } else { | |
1264 | unsigned int remaining = sg->length - offset; | |
1265 | memcpy(buf + offset, &value, remaining); | |
1266 | nbytes += remaining; | |
1267 | ||
1268 | flush_dcache_page(sg_page(sg)); | |
1269 | host->sg = sg = sg_next(sg); | |
1270 | if (!sg) | |
1271 | goto done; | |
1272 | ||
1273 | offset = 4 - remaining; | |
1274 | buf = sg_virt(sg); | |
1275 | memcpy(buf, (u8 *)&value + remaining, offset); | |
1276 | nbytes += offset; | |
1277 | } | |
1278 | ||
1279 | status = mci_readl(host, SR); | |
1280 | if (status & ATMCI_DATA_ERROR_FLAGS) { | |
1281 | mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY | |
1282 | | ATMCI_DATA_ERROR_FLAGS)); | |
1283 | host->data_status = status; | |
965ebf33 HS |
1284 | data->bytes_xfered += nbytes; |
1285 | smp_wmb(); | |
7d2be074 HS |
1286 | atmci_set_pending(host, EVENT_DATA_ERROR); |
1287 | tasklet_schedule(&host->tasklet); | |
965ebf33 | 1288 | return; |
7d2be074 HS |
1289 | } |
1290 | } while (status & MCI_RXRDY); | |
1291 | ||
1292 | host->pio_offset = offset; | |
1293 | data->bytes_xfered += nbytes; | |
1294 | ||
1295 | return; | |
1296 | ||
1297 | done: | |
1298 | mci_writel(host, IDR, MCI_RXRDY); | |
1299 | mci_writel(host, IER, MCI_NOTBUSY); | |
1300 | data->bytes_xfered += nbytes; | |
965ebf33 | 1301 | smp_wmb(); |
c06ad258 | 1302 | atmci_set_pending(host, EVENT_XFER_COMPLETE); |
7d2be074 HS |
1303 | } |
1304 | ||
1305 | static void atmci_write_data_pio(struct atmel_mci *host) | |
1306 | { | |
1307 | struct scatterlist *sg = host->sg; | |
1308 | void *buf = sg_virt(sg); | |
1309 | unsigned int offset = host->pio_offset; | |
1310 | struct mmc_data *data = host->data; | |
1311 | u32 value; | |
1312 | u32 status; | |
1313 | unsigned int nbytes = 0; | |
1314 | ||
1315 | do { | |
1316 | if (likely(offset + 4 <= sg->length)) { | |
1317 | value = get_unaligned((u32 *)(buf + offset)); | |
1318 | mci_writel(host, TDR, value); | |
1319 | ||
1320 | offset += 4; | |
1321 | nbytes += 4; | |
1322 | if (offset == sg->length) { | |
1323 | host->sg = sg = sg_next(sg); | |
1324 | if (!sg) | |
1325 | goto done; | |
1326 | ||
1327 | offset = 0; | |
1328 | buf = sg_virt(sg); | |
1329 | } | |
1330 | } else { | |
1331 | unsigned int remaining = sg->length - offset; | |
1332 | ||
1333 | value = 0; | |
1334 | memcpy(&value, buf + offset, remaining); | |
1335 | nbytes += remaining; | |
1336 | ||
1337 | host->sg = sg = sg_next(sg); | |
1338 | if (!sg) { | |
1339 | mci_writel(host, TDR, value); | |
1340 | goto done; | |
1341 | } | |
1342 | ||
1343 | offset = 4 - remaining; | |
1344 | buf = sg_virt(sg); | |
1345 | memcpy((u8 *)&value + remaining, buf, offset); | |
1346 | mci_writel(host, TDR, value); | |
1347 | nbytes += offset; | |
1348 | } | |
1349 | ||
1350 | status = mci_readl(host, SR); | |
1351 | if (status & ATMCI_DATA_ERROR_FLAGS) { | |
1352 | mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY | |
1353 | | ATMCI_DATA_ERROR_FLAGS)); | |
1354 | host->data_status = status; | |
965ebf33 HS |
1355 | data->bytes_xfered += nbytes; |
1356 | smp_wmb(); | |
7d2be074 HS |
1357 | atmci_set_pending(host, EVENT_DATA_ERROR); |
1358 | tasklet_schedule(&host->tasklet); | |
965ebf33 | 1359 | return; |
7d2be074 HS |
1360 | } |
1361 | } while (status & MCI_TXRDY); | |
1362 | ||
1363 | host->pio_offset = offset; | |
1364 | data->bytes_xfered += nbytes; | |
1365 | ||
1366 | return; | |
1367 | ||
1368 | done: | |
1369 | mci_writel(host, IDR, MCI_TXRDY); | |
1370 | mci_writel(host, IER, MCI_NOTBUSY); | |
1371 | data->bytes_xfered += nbytes; | |
965ebf33 | 1372 | smp_wmb(); |
c06ad258 | 1373 | atmci_set_pending(host, EVENT_XFER_COMPLETE); |
7d2be074 HS |
1374 | } |
1375 | ||
965ebf33 | 1376 | static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status) |
7d2be074 | 1377 | { |
7d2be074 HS |
1378 | mci_writel(host, IDR, MCI_CMDRDY); |
1379 | ||
c06ad258 | 1380 | host->cmd_status = status; |
965ebf33 | 1381 | smp_wmb(); |
c06ad258 | 1382 | atmci_set_pending(host, EVENT_CMD_COMPLETE); |
7d2be074 HS |
1383 | tasklet_schedule(&host->tasklet); |
1384 | } | |
1385 | ||
1386 | static irqreturn_t atmci_interrupt(int irq, void *dev_id) | |
1387 | { | |
965ebf33 | 1388 | struct atmel_mci *host = dev_id; |
7d2be074 HS |
1389 | u32 status, mask, pending; |
1390 | unsigned int pass_count = 0; | |
1391 | ||
7d2be074 HS |
1392 | do { |
1393 | status = mci_readl(host, SR); | |
1394 | mask = mci_readl(host, IMR); | |
1395 | pending = status & mask; | |
1396 | if (!pending) | |
1397 | break; | |
1398 | ||
1399 | if (pending & ATMCI_DATA_ERROR_FLAGS) { | |
1400 | mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS | |
1401 | | MCI_RXRDY | MCI_TXRDY); | |
1402 | pending &= mci_readl(host, IMR); | |
965ebf33 | 1403 | |
7d2be074 | 1404 | host->data_status = status; |
965ebf33 | 1405 | smp_wmb(); |
7d2be074 HS |
1406 | atmci_set_pending(host, EVENT_DATA_ERROR); |
1407 | tasklet_schedule(&host->tasklet); | |
1408 | } | |
1409 | if (pending & MCI_NOTBUSY) { | |
c06ad258 HS |
1410 | mci_writel(host, IDR, |
1411 | ATMCI_DATA_ERROR_FLAGS | MCI_NOTBUSY); | |
ca55f46e HS |
1412 | if (!host->data_status) |
1413 | host->data_status = status; | |
965ebf33 | 1414 | smp_wmb(); |
7d2be074 HS |
1415 | atmci_set_pending(host, EVENT_DATA_COMPLETE); |
1416 | tasklet_schedule(&host->tasklet); | |
1417 | } | |
1418 | if (pending & MCI_RXRDY) | |
1419 | atmci_read_data_pio(host); | |
1420 | if (pending & MCI_TXRDY) | |
1421 | atmci_write_data_pio(host); | |
1422 | ||
1423 | if (pending & MCI_CMDRDY) | |
965ebf33 | 1424 | atmci_cmd_interrupt(host, status); |
7d2be074 HS |
1425 | } while (pass_count++ < 5); |
1426 | ||
7d2be074 HS |
1427 | return pass_count ? IRQ_HANDLED : IRQ_NONE; |
1428 | } | |
1429 | ||
1430 | static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) | |
1431 | { | |
965ebf33 | 1432 | struct atmel_mci_slot *slot = dev_id; |
7d2be074 HS |
1433 | |
1434 | /* | |
1435 | * Disable interrupts until the pin has stabilized and check | |
1436 | * the state then. Use mod_timer() since we may be in the | |
1437 | * middle of the timer routine when this interrupt triggers. | |
1438 | */ | |
1439 | disable_irq_nosync(irq); | |
965ebf33 | 1440 | mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20)); |
7d2be074 HS |
1441 | |
1442 | return IRQ_HANDLED; | |
1443 | } | |
1444 | ||
65e8b083 HS |
1445 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1446 | ||
1447 | static inline struct atmel_mci * | |
1448 | dma_client_to_atmel_mci(struct dma_client *client) | |
1449 | { | |
1450 | return container_of(client, struct atmel_mci, dma.client); | |
1451 | } | |
1452 | ||
1453 | static enum dma_state_client atmci_dma_event(struct dma_client *client, | |
1454 | struct dma_chan *chan, enum dma_state state) | |
1455 | { | |
1456 | struct atmel_mci *host; | |
1457 | enum dma_state_client ret = DMA_NAK; | |
1458 | ||
1459 | host = dma_client_to_atmel_mci(client); | |
1460 | ||
1461 | switch (state) { | |
1462 | case DMA_RESOURCE_AVAILABLE: | |
1463 | spin_lock_bh(&host->lock); | |
1464 | if (!host->dma.chan) { | |
1465 | host->dma.chan = chan; | |
1466 | ret = DMA_ACK; | |
1467 | } | |
1468 | spin_unlock_bh(&host->lock); | |
1469 | ||
1470 | if (ret == DMA_ACK) | |
1471 | dev_info(&host->pdev->dev, | |
1472 | "Using %s for DMA transfers\n", | |
1473 | chan->dev.bus_id); | |
1474 | break; | |
1475 | ||
1476 | case DMA_RESOURCE_REMOVED: | |
1477 | spin_lock_bh(&host->lock); | |
1478 | if (host->dma.chan == chan) { | |
1479 | host->dma.chan = NULL; | |
1480 | ret = DMA_ACK; | |
1481 | } | |
1482 | spin_unlock_bh(&host->lock); | |
1483 | ||
1484 | if (ret == DMA_ACK) | |
1485 | dev_info(&host->pdev->dev, | |
1486 | "Lost %s, falling back to PIO\n", | |
1487 | chan->dev.bus_id); | |
1488 | break; | |
1489 | ||
1490 | default: | |
1491 | break; | |
1492 | } | |
1493 | ||
1494 | ||
1495 | return ret; | |
1496 | } | |
1497 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ | |
1498 | ||
965ebf33 HS |
1499 | static int __init atmci_init_slot(struct atmel_mci *host, |
1500 | struct mci_slot_pdata *slot_data, unsigned int id, | |
1501 | u32 sdc_reg) | |
1502 | { | |
1503 | struct mmc_host *mmc; | |
1504 | struct atmel_mci_slot *slot; | |
1505 | ||
1506 | mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev); | |
1507 | if (!mmc) | |
1508 | return -ENOMEM; | |
1509 | ||
1510 | slot = mmc_priv(mmc); | |
1511 | slot->mmc = mmc; | |
1512 | slot->host = host; | |
1513 | slot->detect_pin = slot_data->detect_pin; | |
1514 | slot->wp_pin = slot_data->wp_pin; | |
1515 | slot->sdc_reg = sdc_reg; | |
1516 | ||
1517 | mmc->ops = &atmci_ops; | |
1518 | mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); | |
1519 | mmc->f_max = host->bus_hz / 2; | |
1520 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | |
1521 | if (slot_data->bus_width >= 4) | |
1522 | mmc->caps |= MMC_CAP_4_BIT_DATA; | |
1523 | ||
1524 | mmc->max_hw_segs = 64; | |
1525 | mmc->max_phys_segs = 64; | |
1526 | mmc->max_req_size = 32768 * 512; | |
1527 | mmc->max_blk_size = 32768; | |
1528 | mmc->max_blk_count = 512; | |
1529 | ||
1530 | /* Assume card is present initially */ | |
1531 | set_bit(ATMCI_CARD_PRESENT, &slot->flags); | |
1532 | if (gpio_is_valid(slot->detect_pin)) { | |
1533 | if (gpio_request(slot->detect_pin, "mmc_detect")) { | |
1534 | dev_dbg(&mmc->class_dev, "no detect pin available\n"); | |
1535 | slot->detect_pin = -EBUSY; | |
1536 | } else if (gpio_get_value(slot->detect_pin)) { | |
1537 | clear_bit(ATMCI_CARD_PRESENT, &slot->flags); | |
1538 | } | |
1539 | } | |
1540 | ||
1541 | if (!gpio_is_valid(slot->detect_pin)) | |
1542 | mmc->caps |= MMC_CAP_NEEDS_POLL; | |
1543 | ||
1544 | if (gpio_is_valid(slot->wp_pin)) { | |
1545 | if (gpio_request(slot->wp_pin, "mmc_wp")) { | |
1546 | dev_dbg(&mmc->class_dev, "no WP pin available\n"); | |
1547 | slot->wp_pin = -EBUSY; | |
1548 | } | |
1549 | } | |
1550 | ||
1551 | host->slot[id] = slot; | |
1552 | mmc_add_host(mmc); | |
1553 | ||
1554 | if (gpio_is_valid(slot->detect_pin)) { | |
1555 | int ret; | |
1556 | ||
1557 | setup_timer(&slot->detect_timer, atmci_detect_change, | |
1558 | (unsigned long)slot); | |
1559 | ||
1560 | ret = request_irq(gpio_to_irq(slot->detect_pin), | |
1561 | atmci_detect_interrupt, | |
1562 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, | |
1563 | "mmc-detect", slot); | |
1564 | if (ret) { | |
1565 | dev_dbg(&mmc->class_dev, | |
1566 | "could not request IRQ %d for detect pin\n", | |
1567 | gpio_to_irq(slot->detect_pin)); | |
1568 | gpio_free(slot->detect_pin); | |
1569 | slot->detect_pin = -EBUSY; | |
1570 | } | |
1571 | } | |
1572 | ||
1573 | atmci_init_debugfs(slot); | |
1574 | ||
1575 | return 0; | |
1576 | } | |
1577 | ||
1578 | static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, | |
1579 | unsigned int id) | |
1580 | { | |
1581 | /* Debugfs stuff is cleaned up by mmc core */ | |
1582 | ||
1583 | set_bit(ATMCI_SHUTDOWN, &slot->flags); | |
1584 | smp_wmb(); | |
1585 | ||
1586 | mmc_remove_host(slot->mmc); | |
1587 | ||
1588 | if (gpio_is_valid(slot->detect_pin)) { | |
1589 | int pin = slot->detect_pin; | |
1590 | ||
1591 | free_irq(gpio_to_irq(pin), slot); | |
1592 | del_timer_sync(&slot->detect_timer); | |
1593 | gpio_free(pin); | |
1594 | } | |
1595 | if (gpio_is_valid(slot->wp_pin)) | |
1596 | gpio_free(slot->wp_pin); | |
1597 | ||
1598 | slot->host->slot[id] = NULL; | |
1599 | mmc_free_host(slot->mmc); | |
1600 | } | |
1601 | ||
7d2be074 HS |
1602 | static int __init atmci_probe(struct platform_device *pdev) |
1603 | { | |
1604 | struct mci_platform_data *pdata; | |
965ebf33 HS |
1605 | struct atmel_mci *host; |
1606 | struct resource *regs; | |
1607 | unsigned int nr_slots; | |
1608 | int irq; | |
1609 | int ret; | |
7d2be074 HS |
1610 | |
1611 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1612 | if (!regs) | |
1613 | return -ENXIO; | |
1614 | pdata = pdev->dev.platform_data; | |
1615 | if (!pdata) | |
1616 | return -ENXIO; | |
1617 | irq = platform_get_irq(pdev, 0); | |
1618 | if (irq < 0) | |
1619 | return irq; | |
1620 | ||
965ebf33 HS |
1621 | host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL); |
1622 | if (!host) | |
7d2be074 HS |
1623 | return -ENOMEM; |
1624 | ||
7d2be074 | 1625 | host->pdev = pdev; |
965ebf33 HS |
1626 | spin_lock_init(&host->lock); |
1627 | INIT_LIST_HEAD(&host->queue); | |
7d2be074 HS |
1628 | |
1629 | host->mck = clk_get(&pdev->dev, "mci_clk"); | |
1630 | if (IS_ERR(host->mck)) { | |
1631 | ret = PTR_ERR(host->mck); | |
1632 | goto err_clk_get; | |
1633 | } | |
1634 | ||
1635 | ret = -ENOMEM; | |
1636 | host->regs = ioremap(regs->start, regs->end - regs->start + 1); | |
1637 | if (!host->regs) | |
1638 | goto err_ioremap; | |
1639 | ||
1640 | clk_enable(host->mck); | |
1641 | mci_writel(host, CR, MCI_CR_SWRST); | |
1642 | host->bus_hz = clk_get_rate(host->mck); | |
1643 | clk_disable(host->mck); | |
1644 | ||
1645 | host->mapbase = regs->start; | |
1646 | ||
965ebf33 | 1647 | tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host); |
7d2be074 | 1648 | |
965ebf33 | 1649 | ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, host); |
7d2be074 HS |
1650 | if (ret) |
1651 | goto err_request_irq; | |
1652 | ||
65e8b083 HS |
1653 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1654 | if (pdata->dma_slave) { | |
1655 | struct dma_slave *slave = pdata->dma_slave; | |
1656 | ||
1657 | slave->tx_reg = regs->start + MCI_TDR; | |
1658 | slave->rx_reg = regs->start + MCI_RDR; | |
1659 | ||
1660 | /* Try to grab a DMA channel */ | |
1661 | host->dma.client.event_callback = atmci_dma_event; | |
1662 | dma_cap_set(DMA_SLAVE, host->dma.client.cap_mask); | |
1663 | host->dma.client.slave = slave; | |
1664 | ||
1665 | dma_async_client_register(&host->dma.client); | |
1666 | dma_async_client_chan_request(&host->dma.client); | |
1667 | } else { | |
1668 | dev_notice(&pdev->dev, "DMA not available, using PIO\n"); | |
1669 | } | |
1670 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ | |
1671 | ||
7d2be074 HS |
1672 | platform_set_drvdata(pdev, host); |
1673 | ||
965ebf33 HS |
1674 | /* We need at least one slot to succeed */ |
1675 | nr_slots = 0; | |
1676 | ret = -ENODEV; | |
1677 | if (pdata->slot[0].bus_width) { | |
1678 | ret = atmci_init_slot(host, &pdata->slot[0], | |
1679 | MCI_SDCSEL_SLOT_A, 0); | |
1680 | if (!ret) | |
1681 | nr_slots++; | |
1682 | } | |
1683 | if (pdata->slot[1].bus_width) { | |
1684 | ret = atmci_init_slot(host, &pdata->slot[1], | |
1685 | MCI_SDCSEL_SLOT_B, 1); | |
1686 | if (!ret) | |
1687 | nr_slots++; | |
7d2be074 HS |
1688 | } |
1689 | ||
965ebf33 HS |
1690 | if (!nr_slots) |
1691 | goto err_init_slot; | |
7d2be074 | 1692 | |
965ebf33 HS |
1693 | dev_info(&pdev->dev, |
1694 | "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", | |
1695 | host->mapbase, irq, nr_slots); | |
deec9ae3 | 1696 | |
7d2be074 HS |
1697 | return 0; |
1698 | ||
965ebf33 | 1699 | err_init_slot: |
65e8b083 HS |
1700 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1701 | if (pdata->dma_slave) | |
1702 | dma_async_client_unregister(&host->dma.client); | |
1703 | #endif | |
965ebf33 | 1704 | free_irq(irq, host); |
7d2be074 HS |
1705 | err_request_irq: |
1706 | iounmap(host->regs); | |
1707 | err_ioremap: | |
1708 | clk_put(host->mck); | |
1709 | err_clk_get: | |
965ebf33 | 1710 | kfree(host); |
7d2be074 HS |
1711 | return ret; |
1712 | } | |
1713 | ||
1714 | static int __exit atmci_remove(struct platform_device *pdev) | |
1715 | { | |
965ebf33 HS |
1716 | struct atmel_mci *host = platform_get_drvdata(pdev); |
1717 | unsigned int i; | |
7d2be074 HS |
1718 | |
1719 | platform_set_drvdata(pdev, NULL); | |
1720 | ||
965ebf33 HS |
1721 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { |
1722 | if (host->slot[i]) | |
1723 | atmci_cleanup_slot(host->slot[i], i); | |
1724 | } | |
7d2be074 | 1725 | |
965ebf33 HS |
1726 | clk_enable(host->mck); |
1727 | mci_writel(host, IDR, ~0UL); | |
1728 | mci_writel(host, CR, MCI_CR_MCIDIS); | |
1729 | mci_readl(host, SR); | |
1730 | clk_disable(host->mck); | |
7d2be074 | 1731 | |
65e8b083 HS |
1732 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1733 | if (host->dma.client.slave) | |
1734 | dma_async_client_unregister(&host->dma.client); | |
1735 | #endif | |
1736 | ||
965ebf33 HS |
1737 | free_irq(platform_get_irq(pdev, 0), host); |
1738 | iounmap(host->regs); | |
7d2be074 | 1739 | |
965ebf33 HS |
1740 | clk_put(host->mck); |
1741 | kfree(host); | |
7d2be074 | 1742 | |
7d2be074 HS |
1743 | return 0; |
1744 | } | |
1745 | ||
1746 | static struct platform_driver atmci_driver = { | |
1747 | .remove = __exit_p(atmci_remove), | |
1748 | .driver = { | |
1749 | .name = "atmel_mci", | |
1750 | }, | |
1751 | }; | |
1752 | ||
1753 | static int __init atmci_init(void) | |
1754 | { | |
1755 | return platform_driver_probe(&atmci_driver, atmci_probe); | |
1756 | } | |
1757 | ||
1758 | static void __exit atmci_exit(void) | |
1759 | { | |
1760 | platform_driver_unregister(&atmci_driver); | |
1761 | } | |
1762 | ||
1763 | module_init(atmci_init); | |
1764 | module_exit(atmci_exit); | |
1765 | ||
1766 | MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); | |
1767 | MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); | |
1768 | MODULE_LICENSE("GPL v2"); |