]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/mmc/host/cavium.c
mmc: cavium: Add core MMC driver for Cavium SOCs
[mirror_ubuntu-jammy-kernel.git] / drivers / mmc / host / cavium.c
CommitLineData
ba3869ff
JG
1/*
2 * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
3 * ThunderX SOCs.
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (C) 2012-2017 Cavium Inc.
10 * Authors:
11 * David Daney <david.daney@cavium.com>
12 * Peter Swain <pswain@cavium.com>
13 * Steven J. Hill <steven.hill@cavium.com>
14 * Jan Glauber <jglauber@cavium.com>
15 */
16#include <linux/bitfield.h>
17#include <linux/delay.h>
18#include <linux/dma-direction.h>
19#include <linux/dma-mapping.h>
20#include <linux/gpio/consumer.h>
21#include <linux/interrupt.h>
22#include <linux/mmc/mmc.h>
23#include <linux/mmc/slot-gpio.h>
24#include <linux/module.h>
25#include <linux/regulator/consumer.h>
26#include <linux/scatterlist.h>
27#include <linux/time.h>
28
29#include "cavium.h"
30
31const char *cvm_mmc_irq_names[] = {
32 "MMC Buffer",
33 "MMC Command",
34 "MMC DMA",
35 "MMC Command Error",
36 "MMC DMA Error",
37 "MMC Switch",
38 "MMC Switch Error",
39 "MMC DMA int Fifo",
40 "MMC DMA int",
41};
42
43/*
44 * The Cavium MMC host hardware assumes that all commands have fixed
45 * command and response types. These are correct if MMC devices are
46 * being used. However, non-MMC devices like SD use command and
47 * response types that are unexpected by the host hardware.
48 *
49 * The command and response types can be overridden by supplying an
50 * XOR value that is applied to the type. We calculate the XOR value
51 * from the values in this table and the flags passed from the MMC
52 * core.
53 */
54static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
55 {0, 0}, /* CMD0 */
56 {0, 3}, /* CMD1 */
57 {0, 2}, /* CMD2 */
58 {0, 1}, /* CMD3 */
59 {0, 0}, /* CMD4 */
60 {0, 1}, /* CMD5 */
61 {0, 1}, /* CMD6 */
62 {0, 1}, /* CMD7 */
63 {1, 1}, /* CMD8 */
64 {0, 2}, /* CMD9 */
65 {0, 2}, /* CMD10 */
66 {1, 1}, /* CMD11 */
67 {0, 1}, /* CMD12 */
68 {0, 1}, /* CMD13 */
69 {1, 1}, /* CMD14 */
70 {0, 0}, /* CMD15 */
71 {0, 1}, /* CMD16 */
72 {1, 1}, /* CMD17 */
73 {1, 1}, /* CMD18 */
74 {3, 1}, /* CMD19 */
75 {2, 1}, /* CMD20 */
76 {0, 0}, /* CMD21 */
77 {0, 0}, /* CMD22 */
78 {0, 1}, /* CMD23 */
79 {2, 1}, /* CMD24 */
80 {2, 1}, /* CMD25 */
81 {2, 1}, /* CMD26 */
82 {2, 1}, /* CMD27 */
83 {0, 1}, /* CMD28 */
84 {0, 1}, /* CMD29 */
85 {1, 1}, /* CMD30 */
86 {1, 1}, /* CMD31 */
87 {0, 0}, /* CMD32 */
88 {0, 0}, /* CMD33 */
89 {0, 0}, /* CMD34 */
90 {0, 1}, /* CMD35 */
91 {0, 1}, /* CMD36 */
92 {0, 0}, /* CMD37 */
93 {0, 1}, /* CMD38 */
94 {0, 4}, /* CMD39 */
95 {0, 5}, /* CMD40 */
96 {0, 0}, /* CMD41 */
97 {2, 1}, /* CMD42 */
98 {0, 0}, /* CMD43 */
99 {0, 0}, /* CMD44 */
100 {0, 0}, /* CMD45 */
101 {0, 0}, /* CMD46 */
102 {0, 0}, /* CMD47 */
103 {0, 0}, /* CMD48 */
104 {0, 0}, /* CMD49 */
105 {0, 0}, /* CMD50 */
106 {0, 0}, /* CMD51 */
107 {0, 0}, /* CMD52 */
108 {0, 0}, /* CMD53 */
109 {0, 0}, /* CMD54 */
110 {0, 1}, /* CMD55 */
111 {0xff, 0xff}, /* CMD56 */
112 {0, 0}, /* CMD57 */
113 {0, 0}, /* CMD58 */
114 {0, 0}, /* CMD59 */
115 {0, 0}, /* CMD60 */
116 {0, 0}, /* CMD61 */
117 {0, 0}, /* CMD62 */
118 {0, 0} /* CMD63 */
119};
120
121static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
122{
123 struct cvm_mmc_cr_type *cr;
124 u8 hardware_ctype, hardware_rtype;
125 u8 desired_ctype = 0, desired_rtype = 0;
126 struct cvm_mmc_cr_mods r;
127
128 cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f);
129 hardware_ctype = cr->ctype;
130 hardware_rtype = cr->rtype;
131 if (cmd->opcode == MMC_GEN_CMD)
132 hardware_ctype = (cmd->arg & 1) ? 1 : 2;
133
134 switch (mmc_cmd_type(cmd)) {
135 case MMC_CMD_ADTC:
136 desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
137 break;
138 case MMC_CMD_AC:
139 case MMC_CMD_BC:
140 case MMC_CMD_BCR:
141 desired_ctype = 0;
142 break;
143 }
144
145 switch (mmc_resp_type(cmd)) {
146 case MMC_RSP_NONE:
147 desired_rtype = 0;
148 break;
149 case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
150 case MMC_RSP_R1B:
151 desired_rtype = 1;
152 break;
153 case MMC_RSP_R2:
154 desired_rtype = 2;
155 break;
156 case MMC_RSP_R3: /* MMC_RSP_R4 */
157 desired_rtype = 3;
158 break;
159 }
160 r.ctype_xor = desired_ctype ^ hardware_ctype;
161 r.rtype_xor = desired_rtype ^ hardware_rtype;
162 return r;
163}
164
165static void check_switch_errors(struct cvm_mmc_host *host)
166{
167 u64 emm_switch;
168
169 emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
170 if (emm_switch & MIO_EMM_SWITCH_ERR0)
171 dev_err(host->dev, "Switch power class error\n");
172 if (emm_switch & MIO_EMM_SWITCH_ERR1)
173 dev_err(host->dev, "Switch hs timing error\n");
174 if (emm_switch & MIO_EMM_SWITCH_ERR2)
175 dev_err(host->dev, "Switch bus width error\n");
176}
177
178static void clear_bus_id(u64 *reg)
179{
180 u64 bus_id_mask = GENMASK_ULL(61, 60);
181
182 *reg &= ~bus_id_mask;
183}
184
185static void set_bus_id(u64 *reg, int bus_id)
186{
187 clear_bus_id(reg);
188 *reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
189}
190
191static int get_bus_id(u64 reg)
192{
193 return FIELD_GET(GENMASK_ULL(61, 60), reg);
194}
195
196/*
197 * We never set the switch_exe bit since that would interfere
198 * with the commands send by the MMC core.
199 */
200static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
201{
202 int retries = 100;
203 u64 rsp_sts;
204 int bus_id;
205
206 /*
207 * Modes setting only taken from slot 0. Work around that hardware
208 * issue by first switching to slot 0.
209 */
210 bus_id = get_bus_id(emm_switch);
211 clear_bus_id(&emm_switch);
212 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
213
214 set_bus_id(&emm_switch, bus_id);
215 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
216
217 /* wait for the switch to finish */
218 do {
219 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
220 if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL))
221 break;
222 udelay(10);
223 } while (--retries);
224
225 check_switch_errors(host);
226}
227
228static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
229{
230 /* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
231 u64 match = 0x3001070fffffffffull;
232
233 return (slot->cached_switch & match) != (new_val & match);
234}
235
236static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
237{
238 u64 timeout;
239
240 if (!slot->clock)
241 return;
242
243 if (ns)
244 timeout = (slot->clock * ns) / NSEC_PER_SEC;
245 else
246 timeout = (slot->clock * 850ull) / 1000ull;
247 writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
248}
249
250static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
251{
252 struct cvm_mmc_host *host = slot->host;
253 u64 emm_switch, wdog;
254
255 emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
256 emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
257 MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
258 set_bus_id(&emm_switch, slot->bus_id);
259
260 wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
261 do_switch(slot->host, emm_switch);
262
263 slot->cached_switch = emm_switch;
264
265 msleep(20);
266
267 writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
268}
269
270/* Switch to another slot if needed */
271static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
272{
273 struct cvm_mmc_host *host = slot->host;
274 struct cvm_mmc_slot *old_slot;
275 u64 emm_sample, emm_switch;
276
277 if (slot->bus_id == host->last_slot)
278 return;
279
280 if (host->last_slot >= 0 && host->slot[host->last_slot]) {
281 old_slot = host->slot[host->last_slot];
282 old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
283 old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
284 }
285
286 writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
287 emm_switch = slot->cached_switch;
288 set_bus_id(&emm_switch, slot->bus_id);
289 do_switch(host, emm_switch);
290
291 emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
292 FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
293 writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
294
295 host->last_slot = slot->bus_id;
296}
297
298static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
299 u64 dbuf)
300{
301 struct sg_mapping_iter *smi = &host->smi;
302 int data_len = req->data->blocks * req->data->blksz;
303 int bytes_xfered, shift = -1;
304 u64 dat = 0;
305
306 /* Auto inc from offset zero */
307 writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
308
309 for (bytes_xfered = 0; bytes_xfered < data_len;) {
310 if (smi->consumed >= smi->length) {
311 if (!sg_miter_next(smi))
312 break;
313 smi->consumed = 0;
314 }
315
316 if (shift < 0) {
317 dat = readq(host->base + MIO_EMM_BUF_DAT(host));
318 shift = 56;
319 }
320
321 while (smi->consumed < smi->length && shift >= 0) {
322 ((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
323 bytes_xfered++;
324 smi->consumed++;
325 shift -= 8;
326 }
327 }
328
329 sg_miter_stop(smi);
330 req->data->bytes_xfered = bytes_xfered;
331 req->data->error = 0;
332}
333
334static void do_write(struct mmc_request *req)
335{
336 req->data->bytes_xfered = req->data->blocks * req->data->blksz;
337 req->data->error = 0;
338}
339
340static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
341 u64 rsp_sts)
342{
343 u64 rsp_hi, rsp_lo;
344
345 if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL))
346 return;
347
348 rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
349
350 switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) {
351 case 1:
352 case 3:
353 req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
354 req->cmd->resp[1] = 0;
355 req->cmd->resp[2] = 0;
356 req->cmd->resp[3] = 0;
357 break;
358 case 2:
359 req->cmd->resp[3] = rsp_lo & 0xffffffff;
360 req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
361 rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
362 req->cmd->resp[1] = rsp_hi & 0xffffffff;
363 req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
364 break;
365 }
366}
367
368static int get_dma_dir(struct mmc_data *data)
369{
370 return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
371}
372
373static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
374{
375 data->bytes_xfered = data->blocks * data->blksz;
376 data->error = 0;
377 return 1;
378}
379
380static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
381{
382 return finish_dma_single(host, data);
383}
384
385static int check_status(u64 rsp_sts)
386{
387 if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS ||
388 rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR ||
389 rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
390 return -EILSEQ;
391 if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
392 rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
393 return -ETIMEDOUT;
394 if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
395 return -EIO;
396 return 0;
397}
398
399/* Try to clean up failed DMA. */
400static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
401{
402 u64 emm_dma;
403
404 emm_dma = readq(host->base + MIO_EMM_DMA(host));
405 emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
406 FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1);
407 set_bus_id(&emm_dma, get_bus_id(rsp_sts));
408 writeq(emm_dma, host->base + MIO_EMM_DMA(host));
409}
410
411irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
412{
413 struct cvm_mmc_host *host = dev_id;
414 struct mmc_request *req;
415 unsigned long flags = 0;
416 u64 emm_int, rsp_sts;
417 bool host_done;
418
419 if (host->need_irq_handler_lock)
420 spin_lock_irqsave(&host->irq_handler_lock, flags);
421 else
422 __acquire(&host->irq_handler_lock);
423
424 /* Clear interrupt bits (write 1 clears ). */
425 emm_int = readq(host->base + MIO_EMM_INT(host));
426 writeq(emm_int, host->base + MIO_EMM_INT(host));
427
428 if (emm_int & MIO_EMM_INT_SWITCH_ERR)
429 check_switch_errors(host);
430
431 req = host->current_req;
432 if (!req)
433 goto out;
434
435 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
436 /*
437 * dma_val set means DMA is still in progress. Don't touch
438 * the request and wait for the interrupt indicating that
439 * the DMA is finished.
440 */
441 if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
442 goto out;
443
444 if (!host->dma_active && req->data &&
445 (emm_int & MIO_EMM_INT_BUF_DONE)) {
446 unsigned int type = (rsp_sts >> 7) & 3;
447
448 if (type == 1)
449 do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
450 else if (type == 2)
451 do_write(req);
452 }
453
454 host_done = emm_int & MIO_EMM_INT_CMD_DONE ||
455 emm_int & MIO_EMM_INT_DMA_DONE ||
456 emm_int & MIO_EMM_INT_CMD_ERR ||
457 emm_int & MIO_EMM_INT_DMA_ERR;
458
459 if (!(host_done && req->done))
460 goto no_req_done;
461
462 req->cmd->error = check_status(rsp_sts);
463
464 if (host->dma_active && req->data)
465 if (!finish_dma(host, req->data))
466 goto no_req_done;
467
468 set_cmd_response(host, req, rsp_sts);
469 if ((emm_int & MIO_EMM_INT_DMA_ERR) &&
470 (rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
471 cleanup_dma(host, rsp_sts);
472
473 host->current_req = NULL;
474 req->done(req);
475
476no_req_done:
477 if (host->dmar_fixup_done)
478 host->dmar_fixup_done(host);
479 if (host_done)
480 host->release_bus(host);
481out:
482 if (host->need_irq_handler_lock)
483 spin_unlock_irqrestore(&host->irq_handler_lock, flags);
484 else
485 __release(&host->irq_handler_lock);
486 return IRQ_RETVAL(emm_int != 0);
487}
488
489/*
490 * Program DMA_CFG and if needed DMA_ADR.
491 * Returns 0 on error, DMA address otherwise.
492 */
493static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
494{
495 u64 dma_cfg, addr;
496 int count, rw;
497
498 count = dma_map_sg(host->dev, data->sg, data->sg_len,
499 get_dma_dir(data));
500 if (!count)
501 return 0;
502
503 rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
504 dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) |
505 FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw);
506#ifdef __LITTLE_ENDIAN
507 dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1);
508#endif
509 dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE,
510 (sg_dma_len(&data->sg[0]) / 8) - 1);
511
512 addr = sg_dma_address(&data->sg[0]);
513 if (!host->big_dma_addr)
514 dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr);
515 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
516
517 pr_debug("[%s] sg_dma_len: %u total sg_elem: %d\n",
518 (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count);
519
520 if (host->big_dma_addr)
521 writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
522 return addr;
523}
524
525static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
526{
527 return prepare_dma_single(host, data);
528}
529
530static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq)
531{
532 struct cvm_mmc_slot *slot = mmc_priv(mmc);
533 u64 emm_dma;
534
535 emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
536 FIELD_PREP(MIO_EMM_DMA_SECTOR,
537 (mrq->data->blksz == 512) ? 1 : 0) |
538 FIELD_PREP(MIO_EMM_DMA_RW,
539 (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) |
540 FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) |
541 FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg);
542 set_bus_id(&emm_dma, slot->bus_id);
543
544 if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) &&
545 (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
546 emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1);
547
548 pr_debug("[%s] blocks: %u multi: %d\n",
549 (emm_dma & MIO_EMM_DMA_RW) ? "W" : "R",
550 mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0);
551 return emm_dma;
552}
553
554static void cvm_mmc_dma_request(struct mmc_host *mmc,
555 struct mmc_request *mrq)
556{
557 struct cvm_mmc_slot *slot = mmc_priv(mmc);
558 struct cvm_mmc_host *host = slot->host;
559 struct mmc_data *data;
560 u64 emm_dma, addr;
561
562 if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
563 !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
564 dev_err(&mmc->card->dev,
565 "Error: cmv_mmc_dma_request no data\n");
566 goto error;
567 }
568
569 cvm_mmc_switch_to(slot);
570
571 data = mrq->data;
572 pr_debug("DMA request blocks: %d block_size: %d total_size: %d\n",
573 data->blocks, data->blksz, data->blocks * data->blksz);
574 if (data->timeout_ns)
575 set_wdog(slot, data->timeout_ns);
576
577 WARN_ON(host->current_req);
578 host->current_req = mrq;
579
580 emm_dma = prepare_ext_dma(mmc, mrq);
581 addr = prepare_dma(host, data);
582 if (!addr) {
583 dev_err(host->dev, "prepare_dma failed\n");
584 goto error;
585 }
586
587 host->dma_active = true;
588 host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
589 MIO_EMM_INT_DMA_ERR);
590
591 if (host->dmar_fixup)
592 host->dmar_fixup(host, mrq->cmd, data, addr);
593
594 /*
595 * If we have a valid SD card in the slot, we set the response
596 * bit mask to check for CRC errors and timeouts only.
597 * Otherwise, use the default power reset value.
598 */
599 if (mmc->card && mmc_card_sd(mmc->card))
600 writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
601 else
602 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
603 writeq(emm_dma, host->base + MIO_EMM_DMA(host));
604 return;
605
606error:
607 mrq->cmd->error = -EINVAL;
608 if (mrq->done)
609 mrq->done(mrq);
610 host->release_bus(host);
611}
612
613static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
614{
615 sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
616 SG_MITER_ATOMIC | SG_MITER_TO_SG);
617}
618
619static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
620{
621 unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
622 struct sg_mapping_iter *smi = &host->smi;
623 unsigned int bytes_xfered;
624 int shift = 56;
625 u64 dat = 0;
626
627 /* Copy data to the xmit buffer before issuing the command. */
628 sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
629
630 /* Auto inc from offset zero, dbuf zero */
631 writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
632
633 for (bytes_xfered = 0; bytes_xfered < data_len;) {
634 if (smi->consumed >= smi->length) {
635 if (!sg_miter_next(smi))
636 break;
637 smi->consumed = 0;
638 }
639
640 while (smi->consumed < smi->length && shift >= 0) {
641 dat |= ((u8 *)smi->addr)[smi->consumed] << shift;
642 bytes_xfered++;
643 smi->consumed++;
644 shift -= 8;
645 }
646
647 if (shift < 0) {
648 writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
649 shift = 56;
650 dat = 0;
651 }
652 }
653 sg_miter_stop(smi);
654}
655
656static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
657{
658 struct cvm_mmc_slot *slot = mmc_priv(mmc);
659 struct cvm_mmc_host *host = slot->host;
660 struct mmc_command *cmd = mrq->cmd;
661 struct cvm_mmc_cr_mods mods;
662 u64 emm_cmd, rsp_sts;
663 int retries = 100;
664
665 /*
666 * Note about locking:
667 * All MMC devices share the same bus and controller. Allow only a
668 * single user of the bootbus/MMC bus at a time. The lock is acquired
669 * on all entry points from the MMC layer.
670 *
671 * For requests the lock is only released after the completion
672 * interrupt!
673 */
674 host->acquire_bus(host);
675
676 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
677 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
678 return cvm_mmc_dma_request(mmc, mrq);
679
680 cvm_mmc_switch_to(slot);
681
682 mods = cvm_mmc_get_cr_mods(cmd);
683
684 WARN_ON(host->current_req);
685 host->current_req = mrq;
686
687 if (cmd->data) {
688 if (cmd->data->flags & MMC_DATA_READ)
689 do_read_request(host, mrq);
690 else
691 do_write_request(host, mrq);
692
693 if (cmd->data->timeout_ns)
694 set_wdog(slot, cmd->data->timeout_ns);
695 } else
696 set_wdog(slot, 0);
697
698 host->dma_active = false;
699 host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
700
701 emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
702 FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
703 FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
704 FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) |
705 FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg);
706 set_bus_id(&emm_cmd, slot->bus_id);
707 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
708 emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET,
709 64 - ((cmd->data->blocks * cmd->data->blksz) / 8));
710
711 writeq(0, host->base + MIO_EMM_STS_MASK(host));
712
713retry:
714 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
715 if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL ||
716 rsp_sts & MIO_EMM_RSP_STS_CMD_VAL ||
717 rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL ||
718 rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) {
719 udelay(10);
720 if (--retries)
721 goto retry;
722 }
723 if (!retries)
724 dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
725 writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
726}
727
728static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
729{
730 struct cvm_mmc_slot *slot = mmc_priv(mmc);
731 struct cvm_mmc_host *host = slot->host;
732 int clk_period = 0, power_class = 10, bus_width = 0;
733 u64 clock, emm_switch;
734
735 host->acquire_bus(host);
736 cvm_mmc_switch_to(slot);
737
738 /* Set the power state */
739 switch (ios->power_mode) {
740 case MMC_POWER_ON:
741 break;
742
743 case MMC_POWER_OFF:
744 cvm_mmc_reset_bus(slot);
745 if (host->global_pwr_gpiod)
746 host->set_shared_power(host, 0);
747 else
748 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
749 break;
750
751 case MMC_POWER_UP:
752 if (host->global_pwr_gpiod)
753 host->set_shared_power(host, 1);
754 else
755 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
756 break;
757 }
758
759 /* Convert bus width to HW definition */
760 switch (ios->bus_width) {
761 case MMC_BUS_WIDTH_8:
762 bus_width = 2;
763 break;
764 case MMC_BUS_WIDTH_4:
765 bus_width = 1;
766 break;
767 case MMC_BUS_WIDTH_1:
768 bus_width = 0;
769 break;
770 }
771
772 /* Change the clock frequency. */
773 clock = ios->clock;
774 if (clock > 52000000)
775 clock = 52000000;
776 slot->clock = clock;
777
778 if (clock)
779 clk_period = (host->sys_freq + clock - 1) / (2 * clock);
780
781 emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
782 (ios->timing == MMC_TIMING_MMC_HS)) |
783 FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
784 FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
785 FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
786 FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
787 set_bus_id(&emm_switch, slot->bus_id);
788
789 if (!switch_val_changed(slot, emm_switch))
790 goto out;
791
792 set_wdog(slot, 0);
793 do_switch(host, emm_switch);
794 slot->cached_switch = emm_switch;
795out:
796 host->release_bus(host);
797}
798
799static const struct mmc_host_ops cvm_mmc_ops = {
800 .request = cvm_mmc_request,
801 .set_ios = cvm_mmc_set_ios,
802 .get_ro = mmc_gpio_get_ro,
803 .get_cd = mmc_gpio_get_cd,
804};
805
806static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
807{
808 struct mmc_host *mmc = slot->mmc;
809
810 clock = min(clock, mmc->f_max);
811 clock = max(clock, mmc->f_min);
812 slot->clock = clock;
813}
814
815static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
816{
817 struct cvm_mmc_host *host = slot->host;
818 u64 emm_switch;
819
820 /* Enable this bus slot. */
821 host->emm_cfg |= (1ull << slot->bus_id);
822 writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
823 udelay(10);
824
825 /* Program initial clock speed and power. */
826 cvm_mmc_set_clock(slot, slot->mmc->f_min);
827 emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10);
828 emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
829 (host->sys_freq / slot->clock) / 2);
830 emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
831 (host->sys_freq / slot->clock) / 2);
832
833 /* Make the changes take effect on this bus slot. */
834 set_bus_id(&emm_switch, slot->bus_id);
835 do_switch(host, emm_switch);
836
837 slot->cached_switch = emm_switch;
838
839 /*
840 * Set watchdog timeout value and default reset value
841 * for the mask register. Finally, set the CARD_RCA
842 * bit so that we can get the card address relative
843 * to the CMD register for CMD7 transactions.
844 */
845 set_wdog(slot, 0);
846 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
847 writeq(1, host->base + MIO_EMM_RCA(host));
848 return 0;
849}
850
851static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
852{
853 u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
854 struct device_node *node = dev->of_node;
855 struct mmc_host *mmc = slot->mmc;
856 u64 clock_period;
857 int ret;
858
859 ret = of_property_read_u32(node, "reg", &id);
860 if (ret) {
861 dev_err(dev, "Missing or invalid reg property on %s\n",
862 of_node_full_name(node));
863 return ret;
864 }
865
866 if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
867 dev_err(dev, "Invalid reg property on %s\n",
868 of_node_full_name(node));
869 return -EINVAL;
870 }
871
872 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
873 if (IS_ERR(mmc->supply.vmmc)) {
874 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
875 return -EPROBE_DEFER;
876 /*
877 * Legacy Octeon firmware has no regulator entry, fall-back to
878 * a hard-coded voltage to get a sane OCR.
879 */
880 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
881 } else {
882 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
883 if (ret > 0)
884 mmc->ocr_avail = ret;
885 }
886
887 /* Common MMC bindings */
888 ret = mmc_of_parse(mmc);
889 if (ret)
890 return ret;
891
892 /* Set bus width */
893 if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
894 of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
895 if (bus_width == 8)
896 mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
897 else if (bus_width == 4)
898 mmc->caps |= MMC_CAP_4_BIT_DATA;
899 }
900
901 /* Set maximum and minimum frequency */
902 if (!mmc->f_max)
903 of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
904 if (!mmc->f_max || mmc->f_max > 52000000)
905 mmc->f_max = 52000000;
906 mmc->f_min = 400000;
907
908 /* Sampling register settings, period in picoseconds */
909 clock_period = 1000000000000ull / slot->host->sys_freq;
910 of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
911 of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
912 slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
913 slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
914
915 return id;
916}
917
918int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
919{
920 struct cvm_mmc_slot *slot;
921 struct mmc_host *mmc;
922 int ret, id;
923
924 mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
925 if (!mmc)
926 return -ENOMEM;
927
928 slot = mmc_priv(mmc);
929 slot->mmc = mmc;
930 slot->host = host;
931
932 ret = cvm_mmc_of_parse(dev, slot);
933 if (ret < 0)
934 goto error;
935 id = ret;
936
937 /* Set up host parameters */
938 mmc->ops = &cvm_mmc_ops;
939
940 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
941 MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD;
942
943 mmc->max_segs = 1;
944
945 /* DMA size field can address up to 8 MB */
946 mmc->max_seg_size = 8 * 1024 * 1024;
947 mmc->max_req_size = mmc->max_seg_size;
948 /* External DMA is in 512 byte blocks */
949 mmc->max_blk_size = 512;
950 /* DMA block count field is 15 bits */
951 mmc->max_blk_count = 32767;
952
953 slot->clock = mmc->f_min;
954 slot->bus_id = id;
955 slot->cached_rca = 1;
956
957 host->acquire_bus(host);
958 host->slot[id] = slot;
959 cvm_mmc_switch_to(slot);
960 cvm_mmc_init_lowlevel(slot);
961 host->release_bus(host);
962
963 ret = mmc_add_host(mmc);
964 if (ret) {
965 dev_err(dev, "mmc_add_host() returned %d\n", ret);
966 slot->host->slot[id] = NULL;
967 goto error;
968 }
969 return 0;
970
971error:
972 mmc_free_host(slot->mmc);
973 return ret;
974}
975
976int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
977{
978 mmc_remove_host(slot->mmc);
979 slot->host->slot[slot->bus_id] = NULL;
980 mmc_free_host(slot->mmc);
981 return 0;
982}