]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/mfd/rtsx_pcr.c
Merge branch 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[mirror_ubuntu-bionic-kernel.git] / drivers / mfd / rtsx_pcr.c
1 /* Driver for Realtek PCI-Express card reader
2 *
3 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2, or (at your option) any
8 * later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author:
19 * Wei WANG <wei_wang@realsil.com.cn>
20 */
21
22 #include <linux/pci.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/highmem.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/idr.h>
30 #include <linux/platform_device.h>
31 #include <linux/mfd/core.h>
32 #include <linux/mfd/rtsx_pci.h>
33 #include <linux/mmc/card.h>
34 #include <asm/unaligned.h>
35
36 #include "rtsx_pcr.h"
37
38 static bool msi_en = true;
39 module_param(msi_en, bool, S_IRUGO | S_IWUSR);
40 MODULE_PARM_DESC(msi_en, "Enable MSI");
41
42 static DEFINE_IDR(rtsx_pci_idr);
43 static DEFINE_SPINLOCK(rtsx_pci_lock);
44
45 static struct mfd_cell rtsx_pcr_cells[] = {
46 [RTSX_SD_CARD] = {
47 .name = DRV_NAME_RTSX_PCI_SDMMC,
48 },
49 [RTSX_MS_CARD] = {
50 .name = DRV_NAME_RTSX_PCI_MS,
51 },
52 };
53
54 static const struct pci_device_id rtsx_pci_ids[] = {
55 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
56 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
57 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
58 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
59 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
60 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
61 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
62 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
63 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
64 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
65 { 0, }
66 };
67
68 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
69
70 static inline void rtsx_pci_enable_aspm(struct rtsx_pcr *pcr)
71 {
72 rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
73 0xFC, pcr->aspm_en);
74 }
75
76 static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
77 {
78 rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
79 0xFC, 0);
80 }
81
82 int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
83 {
84 rtsx_pci_write_register(pcr, MSGTXDATA0,
85 MASK_8_BIT_DEF, (u8) (latency & 0xFF));
86 rtsx_pci_write_register(pcr, MSGTXDATA1,
87 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
88 rtsx_pci_write_register(pcr, MSGTXDATA2,
89 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
90 rtsx_pci_write_register(pcr, MSGTXDATA3,
91 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
92 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
93 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
94
95 return 0;
96 }
97
98 int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
99 {
100 if (pcr->ops->set_ltr_latency)
101 return pcr->ops->set_ltr_latency(pcr, latency);
102 else
103 return rtsx_comm_set_ltr_latency(pcr, latency);
104 }
105
106 static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
107 {
108 struct rtsx_cr_option *option = &pcr->option;
109
110 if (pcr->aspm_enabled == enable)
111 return;
112
113 if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
114 if (enable)
115 rtsx_pci_enable_aspm(pcr);
116 else
117 rtsx_pci_disable_aspm(pcr);
118 } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
119 u8 mask = FORCE_ASPM_VAL_MASK;
120 u8 val = 0;
121
122 if (enable)
123 val = pcr->aspm_en;
124 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
125 }
126
127 pcr->aspm_enabled = enable;
128 }
129
130 static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
131 {
132 if (pcr->ops->set_aspm)
133 pcr->ops->set_aspm(pcr, false);
134 else
135 rtsx_comm_set_aspm(pcr, false);
136 }
137
138 int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
139 {
140 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
141
142 return 0;
143 }
144
145 void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
146 {
147 if (pcr->ops->set_l1off_cfg_sub_d0)
148 pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
149 }
150
151 static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
152 {
153 struct rtsx_cr_option *option = &pcr->option;
154
155 rtsx_disable_aspm(pcr);
156
157 if (option->ltr_enabled)
158 rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
159
160 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
161 rtsx_set_l1off_sub_cfg_d0(pcr, 1);
162 }
163
164 void rtsx_pm_full_on(struct rtsx_pcr *pcr)
165 {
166 if (pcr->ops->full_on)
167 pcr->ops->full_on(pcr);
168 else
169 rtsx_comm_pm_full_on(pcr);
170 }
171
172 void rtsx_pci_start_run(struct rtsx_pcr *pcr)
173 {
174 /* If pci device removed, don't queue idle work any more */
175 if (pcr->remove_pci)
176 return;
177
178 if (pcr->state != PDEV_STAT_RUN) {
179 pcr->state = PDEV_STAT_RUN;
180 if (pcr->ops->enable_auto_blink)
181 pcr->ops->enable_auto_blink(pcr);
182 rtsx_pm_full_on(pcr);
183 }
184
185 mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
186 }
187 EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
188
189 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
190 {
191 int i;
192 u32 val = HAIMR_WRITE_START;
193
194 val |= (u32)(addr & 0x3FFF) << 16;
195 val |= (u32)mask << 8;
196 val |= (u32)data;
197
198 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
199
200 for (i = 0; i < MAX_RW_REG_CNT; i++) {
201 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
202 if ((val & HAIMR_TRANS_END) == 0) {
203 if (data != (u8)val)
204 return -EIO;
205 return 0;
206 }
207 }
208
209 return -ETIMEDOUT;
210 }
211 EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
212
213 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
214 {
215 u32 val = HAIMR_READ_START;
216 int i;
217
218 val |= (u32)(addr & 0x3FFF) << 16;
219 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
220
221 for (i = 0; i < MAX_RW_REG_CNT; i++) {
222 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
223 if ((val & HAIMR_TRANS_END) == 0)
224 break;
225 }
226
227 if (i >= MAX_RW_REG_CNT)
228 return -ETIMEDOUT;
229
230 if (data)
231 *data = (u8)(val & 0xFF);
232
233 return 0;
234 }
235 EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
236
237 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
238 {
239 int err, i, finished = 0;
240 u8 tmp;
241
242 rtsx_pci_init_cmd(pcr);
243
244 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val);
245 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8));
246 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
247 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81);
248
249 err = rtsx_pci_send_cmd(pcr, 100);
250 if (err < 0)
251 return err;
252
253 for (i = 0; i < 100000; i++) {
254 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
255 if (err < 0)
256 return err;
257
258 if (!(tmp & 0x80)) {
259 finished = 1;
260 break;
261 }
262 }
263
264 if (!finished)
265 return -ETIMEDOUT;
266
267 return 0;
268 }
269
270 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
271 {
272 if (pcr->ops->write_phy)
273 return pcr->ops->write_phy(pcr, addr, val);
274
275 return __rtsx_pci_write_phy_register(pcr, addr, val);
276 }
277 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
278
279 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
280 {
281 int err, i, finished = 0;
282 u16 data;
283 u8 *ptr, tmp;
284
285 rtsx_pci_init_cmd(pcr);
286
287 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
288 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80);
289
290 err = rtsx_pci_send_cmd(pcr, 100);
291 if (err < 0)
292 return err;
293
294 for (i = 0; i < 100000; i++) {
295 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
296 if (err < 0)
297 return err;
298
299 if (!(tmp & 0x80)) {
300 finished = 1;
301 break;
302 }
303 }
304
305 if (!finished)
306 return -ETIMEDOUT;
307
308 rtsx_pci_init_cmd(pcr);
309
310 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0);
311 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0);
312
313 err = rtsx_pci_send_cmd(pcr, 100);
314 if (err < 0)
315 return err;
316
317 ptr = rtsx_pci_get_cmd_data(pcr);
318 data = ((u16)ptr[1] << 8) | ptr[0];
319
320 if (val)
321 *val = data;
322
323 return 0;
324 }
325
326 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
327 {
328 if (pcr->ops->read_phy)
329 return pcr->ops->read_phy(pcr, addr, val);
330
331 return __rtsx_pci_read_phy_register(pcr, addr, val);
332 }
333 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
334
335 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
336 {
337 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
338 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
339
340 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
341 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
342 }
343 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
344
345 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
346 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
347 {
348 unsigned long flags;
349 u32 val = 0;
350 u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
351
352 val |= (u32)(cmd_type & 0x03) << 30;
353 val |= (u32)(reg_addr & 0x3FFF) << 16;
354 val |= (u32)mask << 8;
355 val |= (u32)data;
356
357 spin_lock_irqsave(&pcr->lock, flags);
358 ptr += pcr->ci;
359 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
360 put_unaligned_le32(val, ptr);
361 ptr++;
362 pcr->ci++;
363 }
364 spin_unlock_irqrestore(&pcr->lock, flags);
365 }
366 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
367
368 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
369 {
370 u32 val = 1 << 31;
371
372 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
373
374 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
375 /* Hardware Auto Response */
376 val |= 0x40000000;
377 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
378 }
379 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
380
381 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
382 {
383 struct completion trans_done;
384 u32 val = 1 << 31;
385 long timeleft;
386 unsigned long flags;
387 int err = 0;
388
389 spin_lock_irqsave(&pcr->lock, flags);
390
391 /* set up data structures for the wakeup system */
392 pcr->done = &trans_done;
393 pcr->trans_result = TRANS_NOT_READY;
394 init_completion(&trans_done);
395
396 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
397
398 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
399 /* Hardware Auto Response */
400 val |= 0x40000000;
401 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
402
403 spin_unlock_irqrestore(&pcr->lock, flags);
404
405 /* Wait for TRANS_OK_INT */
406 timeleft = wait_for_completion_interruptible_timeout(
407 &trans_done, msecs_to_jiffies(timeout));
408 if (timeleft <= 0) {
409 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
410 err = -ETIMEDOUT;
411 goto finish_send_cmd;
412 }
413
414 spin_lock_irqsave(&pcr->lock, flags);
415 if (pcr->trans_result == TRANS_RESULT_FAIL)
416 err = -EINVAL;
417 else if (pcr->trans_result == TRANS_RESULT_OK)
418 err = 0;
419 else if (pcr->trans_result == TRANS_NO_DEVICE)
420 err = -ENODEV;
421 spin_unlock_irqrestore(&pcr->lock, flags);
422
423 finish_send_cmd:
424 spin_lock_irqsave(&pcr->lock, flags);
425 pcr->done = NULL;
426 spin_unlock_irqrestore(&pcr->lock, flags);
427
428 if ((err < 0) && (err != -ENODEV))
429 rtsx_pci_stop_cmd(pcr);
430
431 if (pcr->finish_me)
432 complete(pcr->finish_me);
433
434 return err;
435 }
436 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
437
438 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
439 dma_addr_t addr, unsigned int len, int end)
440 {
441 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
442 u64 val;
443 u8 option = SG_VALID | SG_TRANS_DATA;
444
445 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
446
447 if (end)
448 option |= SG_END;
449 val = ((u64)addr << 32) | ((u64)len << 12) | option;
450
451 put_unaligned_le64(val, ptr);
452 pcr->sgi++;
453 }
454
455 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
456 int num_sg, bool read, int timeout)
457 {
458 int err = 0, count;
459
460 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
461 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
462 if (count < 1)
463 return -EINVAL;
464 pcr_dbg(pcr, "DMA mapping count: %d\n", count);
465
466 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
467
468 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
469
470 return err;
471 }
472 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
473
474 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
475 int num_sg, bool read)
476 {
477 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
478
479 if (pcr->remove_pci)
480 return -EINVAL;
481
482 if ((sglist == NULL) || (num_sg <= 0))
483 return -EINVAL;
484
485 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
486 }
487 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
488
489 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
490 int num_sg, bool read)
491 {
492 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
493
494 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
495 }
496 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
497
498 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
499 int count, bool read, int timeout)
500 {
501 struct completion trans_done;
502 struct scatterlist *sg;
503 dma_addr_t addr;
504 long timeleft;
505 unsigned long flags;
506 unsigned int len;
507 int i, err = 0;
508 u32 val;
509 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
510
511 if (pcr->remove_pci)
512 return -ENODEV;
513
514 if ((sglist == NULL) || (count < 1))
515 return -EINVAL;
516
517 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
518 pcr->sgi = 0;
519 for_each_sg(sglist, sg, count, i) {
520 addr = sg_dma_address(sg);
521 len = sg_dma_len(sg);
522 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
523 }
524
525 spin_lock_irqsave(&pcr->lock, flags);
526
527 pcr->done = &trans_done;
528 pcr->trans_result = TRANS_NOT_READY;
529 init_completion(&trans_done);
530 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
531 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
532
533 spin_unlock_irqrestore(&pcr->lock, flags);
534
535 timeleft = wait_for_completion_interruptible_timeout(
536 &trans_done, msecs_to_jiffies(timeout));
537 if (timeleft <= 0) {
538 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
539 err = -ETIMEDOUT;
540 goto out;
541 }
542
543 spin_lock_irqsave(&pcr->lock, flags);
544 if (pcr->trans_result == TRANS_RESULT_FAIL) {
545 err = -EILSEQ;
546 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
547 pcr->dma_error_count++;
548 }
549
550 else if (pcr->trans_result == TRANS_NO_DEVICE)
551 err = -ENODEV;
552 spin_unlock_irqrestore(&pcr->lock, flags);
553
554 out:
555 spin_lock_irqsave(&pcr->lock, flags);
556 pcr->done = NULL;
557 spin_unlock_irqrestore(&pcr->lock, flags);
558
559 if ((err < 0) && (err != -ENODEV))
560 rtsx_pci_stop_cmd(pcr);
561
562 if (pcr->finish_me)
563 complete(pcr->finish_me);
564
565 return err;
566 }
567 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
568
569 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
570 {
571 int err;
572 int i, j;
573 u16 reg;
574 u8 *ptr;
575
576 if (buf_len > 512)
577 buf_len = 512;
578
579 ptr = buf;
580 reg = PPBUF_BASE2;
581 for (i = 0; i < buf_len / 256; i++) {
582 rtsx_pci_init_cmd(pcr);
583
584 for (j = 0; j < 256; j++)
585 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
586
587 err = rtsx_pci_send_cmd(pcr, 250);
588 if (err < 0)
589 return err;
590
591 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
592 ptr += 256;
593 }
594
595 if (buf_len % 256) {
596 rtsx_pci_init_cmd(pcr);
597
598 for (j = 0; j < buf_len % 256; j++)
599 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
600
601 err = rtsx_pci_send_cmd(pcr, 250);
602 if (err < 0)
603 return err;
604 }
605
606 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
607
608 return 0;
609 }
610 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
611
612 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
613 {
614 int err;
615 int i, j;
616 u16 reg;
617 u8 *ptr;
618
619 if (buf_len > 512)
620 buf_len = 512;
621
622 ptr = buf;
623 reg = PPBUF_BASE2;
624 for (i = 0; i < buf_len / 256; i++) {
625 rtsx_pci_init_cmd(pcr);
626
627 for (j = 0; j < 256; j++) {
628 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
629 reg++, 0xFF, *ptr);
630 ptr++;
631 }
632
633 err = rtsx_pci_send_cmd(pcr, 250);
634 if (err < 0)
635 return err;
636 }
637
638 if (buf_len % 256) {
639 rtsx_pci_init_cmd(pcr);
640
641 for (j = 0; j < buf_len % 256; j++) {
642 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
643 reg++, 0xFF, *ptr);
644 ptr++;
645 }
646
647 err = rtsx_pci_send_cmd(pcr, 250);
648 if (err < 0)
649 return err;
650 }
651
652 return 0;
653 }
654 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
655
656 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
657 {
658 rtsx_pci_init_cmd(pcr);
659
660 while (*tbl & 0xFFFF0000) {
661 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
662 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
663 tbl++;
664 }
665
666 return rtsx_pci_send_cmd(pcr, 100);
667 }
668
669 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
670 {
671 const u32 *tbl;
672
673 if (card == RTSX_SD_CARD)
674 tbl = pcr->sd_pull_ctl_enable_tbl;
675 else if (card == RTSX_MS_CARD)
676 tbl = pcr->ms_pull_ctl_enable_tbl;
677 else
678 return -EINVAL;
679
680 return rtsx_pci_set_pull_ctl(pcr, tbl);
681 }
682 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
683
684 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
685 {
686 const u32 *tbl;
687
688 if (card == RTSX_SD_CARD)
689 tbl = pcr->sd_pull_ctl_disable_tbl;
690 else if (card == RTSX_MS_CARD)
691 tbl = pcr->ms_pull_ctl_disable_tbl;
692 else
693 return -EINVAL;
694
695
696 return rtsx_pci_set_pull_ctl(pcr, tbl);
697 }
698 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
699
700 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
701 {
702 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN;
703
704 if (pcr->num_slots > 1)
705 pcr->bier |= MS_INT_EN;
706
707 /* Enable Bus Interrupt */
708 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
709
710 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
711 }
712
713 static inline u8 double_ssc_depth(u8 depth)
714 {
715 return ((depth > 1) ? (depth - 1) : depth);
716 }
717
718 static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
719 {
720 if (div > CLK_DIV_1) {
721 if (ssc_depth > (div - 1))
722 ssc_depth -= (div - 1);
723 else
724 ssc_depth = SSC_DEPTH_4M;
725 }
726
727 return ssc_depth;
728 }
729
730 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
731 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
732 {
733 int err, clk;
734 u8 n, clk_divider, mcu_cnt, div;
735 static const u8 depth[] = {
736 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
737 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
738 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
739 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
740 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
741 };
742
743 if (initial_mode) {
744 /* We use 250k(around) here, in initial stage */
745 clk_divider = SD_CLK_DIVIDE_128;
746 card_clock = 30000000;
747 } else {
748 clk_divider = SD_CLK_DIVIDE_0;
749 }
750 err = rtsx_pci_write_register(pcr, SD_CFG1,
751 SD_CLK_DIVIDE_MASK, clk_divider);
752 if (err < 0)
753 return err;
754
755 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
756 if (card_clock == UHS_SDR104_MAX_DTR &&
757 pcr->dma_error_count &&
758 PCI_PID(pcr) == RTS5227_DEVICE_ID)
759 card_clock = UHS_SDR104_MAX_DTR -
760 (pcr->dma_error_count * 20000000);
761
762 card_clock /= 1000000;
763 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
764
765 clk = card_clock;
766 if (!initial_mode && double_clk)
767 clk = card_clock * 2;
768 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
769 clk, pcr->cur_clock);
770
771 if (clk == pcr->cur_clock)
772 return 0;
773
774 if (pcr->ops->conv_clk_and_div_n)
775 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
776 else
777 n = (u8)(clk - 2);
778 if ((clk <= 2) || (n > MAX_DIV_N_PCR))
779 return -EINVAL;
780
781 mcu_cnt = (u8)(125/clk + 3);
782 if (mcu_cnt > 15)
783 mcu_cnt = 15;
784
785 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
786 div = CLK_DIV_1;
787 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
788 if (pcr->ops->conv_clk_and_div_n) {
789 int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
790 DIV_N_TO_CLK) * 2;
791 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
792 CLK_TO_DIV_N);
793 } else {
794 n = (n + 2) * 2 - 2;
795 }
796 div++;
797 }
798 pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
799
800 ssc_depth = depth[ssc_depth];
801 if (double_clk)
802 ssc_depth = double_ssc_depth(ssc_depth);
803
804 ssc_depth = revise_ssc_depth(ssc_depth, div);
805 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
806
807 rtsx_pci_init_cmd(pcr);
808 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
809 CLK_LOW_FREQ, CLK_LOW_FREQ);
810 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
811 0xFF, (div << 4) | mcu_cnt);
812 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
813 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
814 SSC_DEPTH_MASK, ssc_depth);
815 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
816 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
817 if (vpclk) {
818 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
819 PHASE_NOT_RESET, 0);
820 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
821 PHASE_NOT_RESET, PHASE_NOT_RESET);
822 }
823
824 err = rtsx_pci_send_cmd(pcr, 2000);
825 if (err < 0)
826 return err;
827
828 /* Wait SSC clock stable */
829 udelay(10);
830 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
831 if (err < 0)
832 return err;
833
834 pcr->cur_clock = clk;
835 return 0;
836 }
837 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
838
839 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
840 {
841 if (pcr->ops->card_power_on)
842 return pcr->ops->card_power_on(pcr, card);
843
844 return 0;
845 }
846 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
847
848 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
849 {
850 if (pcr->ops->card_power_off)
851 return pcr->ops->card_power_off(pcr, card);
852
853 return 0;
854 }
855 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
856
857 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
858 {
859 static const unsigned int cd_mask[] = {
860 [RTSX_SD_CARD] = SD_EXIST,
861 [RTSX_MS_CARD] = MS_EXIST
862 };
863
864 if (!(pcr->flags & PCR_MS_PMOS)) {
865 /* When using single PMOS, accessing card is not permitted
866 * if the existing card is not the designated one.
867 */
868 if (pcr->card_exist & (~cd_mask[card]))
869 return -EIO;
870 }
871
872 return 0;
873 }
874 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
875
876 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
877 {
878 if (pcr->ops->switch_output_voltage)
879 return pcr->ops->switch_output_voltage(pcr, voltage);
880
881 return 0;
882 }
883 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
884
885 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
886 {
887 unsigned int val;
888
889 val = rtsx_pci_readl(pcr, RTSX_BIPR);
890 if (pcr->ops->cd_deglitch)
891 val = pcr->ops->cd_deglitch(pcr);
892
893 return val;
894 }
895 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
896
897 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
898 {
899 struct completion finish;
900
901 pcr->finish_me = &finish;
902 init_completion(&finish);
903
904 if (pcr->done)
905 complete(pcr->done);
906
907 if (!pcr->remove_pci)
908 rtsx_pci_stop_cmd(pcr);
909
910 wait_for_completion_interruptible_timeout(&finish,
911 msecs_to_jiffies(2));
912 pcr->finish_me = NULL;
913 }
914 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
915
916 static void rtsx_pci_card_detect(struct work_struct *work)
917 {
918 struct delayed_work *dwork;
919 struct rtsx_pcr *pcr;
920 unsigned long flags;
921 unsigned int card_detect = 0, card_inserted, card_removed;
922 u32 irq_status;
923
924 dwork = to_delayed_work(work);
925 pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
926
927 pcr_dbg(pcr, "--> %s\n", __func__);
928
929 mutex_lock(&pcr->pcr_mutex);
930 spin_lock_irqsave(&pcr->lock, flags);
931
932 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
933 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
934
935 irq_status &= CARD_EXIST;
936 card_inserted = pcr->card_inserted & irq_status;
937 card_removed = pcr->card_removed;
938 pcr->card_inserted = 0;
939 pcr->card_removed = 0;
940
941 spin_unlock_irqrestore(&pcr->lock, flags);
942
943 if (card_inserted || card_removed) {
944 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
945 card_inserted, card_removed);
946
947 if (pcr->ops->cd_deglitch)
948 card_inserted = pcr->ops->cd_deglitch(pcr);
949
950 card_detect = card_inserted | card_removed;
951
952 pcr->card_exist |= card_inserted;
953 pcr->card_exist &= ~card_removed;
954 }
955
956 mutex_unlock(&pcr->pcr_mutex);
957
958 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
959 pcr->slots[RTSX_SD_CARD].card_event(
960 pcr->slots[RTSX_SD_CARD].p_dev);
961 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
962 pcr->slots[RTSX_MS_CARD].card_event(
963 pcr->slots[RTSX_MS_CARD].p_dev);
964 }
965
966 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
967 {
968 struct rtsx_pcr *pcr = dev_id;
969 u32 int_reg;
970
971 if (!pcr)
972 return IRQ_NONE;
973
974 spin_lock(&pcr->lock);
975
976 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
977 /* Clear interrupt flag */
978 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
979 if ((int_reg & pcr->bier) == 0) {
980 spin_unlock(&pcr->lock);
981 return IRQ_NONE;
982 }
983 if (int_reg == 0xFFFFFFFF) {
984 spin_unlock(&pcr->lock);
985 return IRQ_HANDLED;
986 }
987
988 int_reg &= (pcr->bier | 0x7FFFFF);
989
990 if (int_reg & SD_INT) {
991 if (int_reg & SD_EXIST) {
992 pcr->card_inserted |= SD_EXIST;
993 } else {
994 pcr->card_removed |= SD_EXIST;
995 pcr->card_inserted &= ~SD_EXIST;
996 }
997 pcr->dma_error_count = 0;
998 }
999
1000 if (int_reg & MS_INT) {
1001 if (int_reg & MS_EXIST) {
1002 pcr->card_inserted |= MS_EXIST;
1003 } else {
1004 pcr->card_removed |= MS_EXIST;
1005 pcr->card_inserted &= ~MS_EXIST;
1006 }
1007 }
1008
1009 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1010 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1011 pcr->trans_result = TRANS_RESULT_FAIL;
1012 if (pcr->done)
1013 complete(pcr->done);
1014 } else if (int_reg & TRANS_OK_INT) {
1015 pcr->trans_result = TRANS_RESULT_OK;
1016 if (pcr->done)
1017 complete(pcr->done);
1018 }
1019 }
1020
1021 if (pcr->card_inserted || pcr->card_removed)
1022 schedule_delayed_work(&pcr->carddet_work,
1023 msecs_to_jiffies(200));
1024
1025 spin_unlock(&pcr->lock);
1026 return IRQ_HANDLED;
1027 }
1028
1029 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1030 {
1031 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1032 __func__, pcr->msi_en, pcr->pci->irq);
1033
1034 if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1035 pcr->msi_en ? 0 : IRQF_SHARED,
1036 DRV_NAME_RTSX_PCI, pcr)) {
1037 dev_err(&(pcr->pci->dev),
1038 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1039 pcr->pci->irq);
1040 return -1;
1041 }
1042
1043 pcr->irq = pcr->pci->irq;
1044 pci_intx(pcr->pci, !pcr->msi_en);
1045
1046 return 0;
1047 }
1048
1049 static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1050 {
1051 if (pcr->ops->set_aspm)
1052 pcr->ops->set_aspm(pcr, true);
1053 else
1054 rtsx_comm_set_aspm(pcr, true);
1055 }
1056
1057 static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1058 {
1059 struct rtsx_cr_option *option = &pcr->option;
1060
1061 if (option->ltr_enabled) {
1062 u32 latency = option->ltr_l1off_latency;
1063
1064 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1065 mdelay(option->l1_snooze_delay);
1066
1067 rtsx_set_ltr_latency(pcr, latency);
1068 }
1069
1070 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1071 rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1072
1073 rtsx_enable_aspm(pcr);
1074 }
1075
1076 void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1077 {
1078 if (pcr->ops->power_saving)
1079 pcr->ops->power_saving(pcr);
1080 else
1081 rtsx_comm_pm_power_saving(pcr);
1082 }
1083
1084 static void rtsx_pci_idle_work(struct work_struct *work)
1085 {
1086 struct delayed_work *dwork = to_delayed_work(work);
1087 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
1088
1089 pcr_dbg(pcr, "--> %s\n", __func__);
1090
1091 mutex_lock(&pcr->pcr_mutex);
1092
1093 pcr->state = PDEV_STAT_IDLE;
1094
1095 if (pcr->ops->disable_auto_blink)
1096 pcr->ops->disable_auto_blink(pcr);
1097 if (pcr->ops->turn_off_led)
1098 pcr->ops->turn_off_led(pcr);
1099
1100 rtsx_pm_power_saving(pcr);
1101
1102 mutex_unlock(&pcr->pcr_mutex);
1103 }
1104
1105 #ifdef CONFIG_PM
1106 static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
1107 {
1108 if (pcr->ops->turn_off_led)
1109 pcr->ops->turn_off_led(pcr);
1110
1111 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1112 pcr->bier = 0;
1113
1114 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1115 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1116
1117 if (pcr->ops->force_power_down)
1118 pcr->ops->force_power_down(pcr, pm_state);
1119 }
1120 #endif
1121
1122 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1123 {
1124 int err;
1125
1126 pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP);
1127 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1128
1129 rtsx_pci_enable_bus_int(pcr);
1130
1131 /* Power on SSC */
1132 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1133 if (err < 0)
1134 return err;
1135
1136 /* Wait SSC power stable */
1137 udelay(200);
1138
1139 rtsx_pci_disable_aspm(pcr);
1140 if (pcr->ops->optimize_phy) {
1141 err = pcr->ops->optimize_phy(pcr);
1142 if (err < 0)
1143 return err;
1144 }
1145
1146 rtsx_pci_init_cmd(pcr);
1147
1148 /* Set mcu_cnt to 7 to ensure data can be sampled properly */
1149 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1150
1151 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1152 /* Disable card clock */
1153 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1154 /* Reset delink mode */
1155 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1156 /* Card driving select */
1157 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1158 0xFF, pcr->card_drive_sel);
1159 /* Enable SSC Clock */
1160 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1161 0xFF, SSC_8X_EN | SSC_SEL_4M);
1162 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1163 /* Disable cd_pwr_save */
1164 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1165 /* Clear Link Ready Interrupt */
1166 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1167 LINK_RDY_INT, LINK_RDY_INT);
1168 /* Enlarge the estimation window of PERST# glitch
1169 * to reduce the chance of invalid card interrupt
1170 */
1171 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1172 /* Update RC oscillator to 400k
1173 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1174 * 1: 2M 0: 400k
1175 */
1176 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1177 /* Set interrupt write clear
1178 * bit 1: U_elbi_if_rd_clr_en
1179 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1180 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1181 */
1182 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1183
1184 err = rtsx_pci_send_cmd(pcr, 100);
1185 if (err < 0)
1186 return err;
1187
1188 switch (PCI_PID(pcr)) {
1189 case PID_5250:
1190 case PID_524A:
1191 case PID_525A:
1192 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1193 break;
1194 default:
1195 break;
1196 }
1197
1198 /* Enable clk_request_n to enable clock power management */
1199 rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
1200 /* Enter L1 when host tx idle */
1201 rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
1202
1203 if (pcr->ops->extra_init_hw) {
1204 err = pcr->ops->extra_init_hw(pcr);
1205 if (err < 0)
1206 return err;
1207 }
1208
1209 /* No CD interrupt if probing driver with card inserted.
1210 * So we need to initialize pcr->card_exist here.
1211 */
1212 if (pcr->ops->cd_deglitch)
1213 pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1214 else
1215 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1216
1217 return 0;
1218 }
1219
1220 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1221 {
1222 int err;
1223
1224 spin_lock_init(&pcr->lock);
1225 mutex_init(&pcr->pcr_mutex);
1226
1227 switch (PCI_PID(pcr)) {
1228 default:
1229 case 0x5209:
1230 rts5209_init_params(pcr);
1231 break;
1232
1233 case 0x5229:
1234 rts5229_init_params(pcr);
1235 break;
1236
1237 case 0x5289:
1238 rtl8411_init_params(pcr);
1239 break;
1240
1241 case 0x5227:
1242 rts5227_init_params(pcr);
1243 break;
1244
1245 case 0x522A:
1246 rts522a_init_params(pcr);
1247 break;
1248
1249 case 0x5249:
1250 rts5249_init_params(pcr);
1251 break;
1252
1253 case 0x524A:
1254 rts524a_init_params(pcr);
1255 break;
1256
1257 case 0x525A:
1258 rts525a_init_params(pcr);
1259 break;
1260
1261 case 0x5287:
1262 rtl8411b_init_params(pcr);
1263 break;
1264
1265 case 0x5286:
1266 rtl8402_init_params(pcr);
1267 break;
1268 }
1269
1270 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1271 PCI_PID(pcr), pcr->ic_version);
1272
1273 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1274 GFP_KERNEL);
1275 if (!pcr->slots)
1276 return -ENOMEM;
1277
1278 if (pcr->ops->fetch_vendor_settings)
1279 pcr->ops->fetch_vendor_settings(pcr);
1280
1281 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1282 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1283 pcr->sd30_drive_sel_1v8);
1284 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1285 pcr->sd30_drive_sel_3v3);
1286 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1287 pcr->card_drive_sel);
1288 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1289
1290 pcr->state = PDEV_STAT_IDLE;
1291 err = rtsx_pci_init_hw(pcr);
1292 if (err < 0) {
1293 kfree(pcr->slots);
1294 return err;
1295 }
1296
1297 return 0;
1298 }
1299
1300 static int rtsx_pci_probe(struct pci_dev *pcidev,
1301 const struct pci_device_id *id)
1302 {
1303 struct rtsx_pcr *pcr;
1304 struct pcr_handle *handle;
1305 u32 base, len;
1306 int ret, i, bar = 0;
1307
1308 dev_dbg(&(pcidev->dev),
1309 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1310 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1311 (int)pcidev->revision);
1312
1313 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
1314 if (ret < 0)
1315 return ret;
1316
1317 ret = pci_enable_device(pcidev);
1318 if (ret)
1319 return ret;
1320
1321 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1322 if (ret)
1323 goto disable;
1324
1325 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1326 if (!pcr) {
1327 ret = -ENOMEM;
1328 goto release_pci;
1329 }
1330
1331 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1332 if (!handle) {
1333 ret = -ENOMEM;
1334 goto free_pcr;
1335 }
1336 handle->pcr = pcr;
1337
1338 idr_preload(GFP_KERNEL);
1339 spin_lock(&rtsx_pci_lock);
1340 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1341 if (ret >= 0)
1342 pcr->id = ret;
1343 spin_unlock(&rtsx_pci_lock);
1344 idr_preload_end();
1345 if (ret < 0)
1346 goto free_handle;
1347
1348 pcr->pci = pcidev;
1349 dev_set_drvdata(&pcidev->dev, handle);
1350
1351 if (CHK_PCI_PID(pcr, 0x525A))
1352 bar = 1;
1353 len = pci_resource_len(pcidev, bar);
1354 base = pci_resource_start(pcidev, bar);
1355 pcr->remap_addr = ioremap_nocache(base, len);
1356 if (!pcr->remap_addr) {
1357 ret = -ENOMEM;
1358 goto free_handle;
1359 }
1360
1361 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1362 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1363 GFP_KERNEL);
1364 if (pcr->rtsx_resv_buf == NULL) {
1365 ret = -ENXIO;
1366 goto unmap;
1367 }
1368 pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1369 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1370 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1371 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1372
1373 pcr->card_inserted = 0;
1374 pcr->card_removed = 0;
1375 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1376 INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
1377
1378 pcr->msi_en = msi_en;
1379 if (pcr->msi_en) {
1380 ret = pci_enable_msi(pcidev);
1381 if (ret)
1382 pcr->msi_en = false;
1383 }
1384
1385 ret = rtsx_pci_acquire_irq(pcr);
1386 if (ret < 0)
1387 goto disable_msi;
1388
1389 pci_set_master(pcidev);
1390 synchronize_irq(pcr->irq);
1391
1392 ret = rtsx_pci_init_chip(pcr);
1393 if (ret < 0)
1394 goto disable_irq;
1395
1396 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1397 rtsx_pcr_cells[i].platform_data = handle;
1398 rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1399 }
1400 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1401 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1402 if (ret < 0)
1403 goto disable_irq;
1404
1405 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1406
1407 return 0;
1408
1409 disable_irq:
1410 free_irq(pcr->irq, (void *)pcr);
1411 disable_msi:
1412 if (pcr->msi_en)
1413 pci_disable_msi(pcr->pci);
1414 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1415 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1416 unmap:
1417 iounmap(pcr->remap_addr);
1418 free_handle:
1419 kfree(handle);
1420 free_pcr:
1421 kfree(pcr);
1422 release_pci:
1423 pci_release_regions(pcidev);
1424 disable:
1425 pci_disable_device(pcidev);
1426
1427 return ret;
1428 }
1429
1430 static void rtsx_pci_remove(struct pci_dev *pcidev)
1431 {
1432 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1433 struct rtsx_pcr *pcr = handle->pcr;
1434
1435 pcr->remove_pci = true;
1436
1437 /* Disable interrupts at the pcr level */
1438 spin_lock_irq(&pcr->lock);
1439 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1440 pcr->bier = 0;
1441 spin_unlock_irq(&pcr->lock);
1442
1443 cancel_delayed_work_sync(&pcr->carddet_work);
1444 cancel_delayed_work_sync(&pcr->idle_work);
1445
1446 mfd_remove_devices(&pcidev->dev);
1447
1448 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1449 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1450 free_irq(pcr->irq, (void *)pcr);
1451 if (pcr->msi_en)
1452 pci_disable_msi(pcr->pci);
1453 iounmap(pcr->remap_addr);
1454
1455 pci_release_regions(pcidev);
1456 pci_disable_device(pcidev);
1457
1458 spin_lock(&rtsx_pci_lock);
1459 idr_remove(&rtsx_pci_idr, pcr->id);
1460 spin_unlock(&rtsx_pci_lock);
1461
1462 kfree(pcr->slots);
1463 kfree(pcr);
1464 kfree(handle);
1465
1466 dev_dbg(&(pcidev->dev),
1467 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1468 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1469 }
1470
1471 #ifdef CONFIG_PM
1472
1473 static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
1474 {
1475 struct pcr_handle *handle;
1476 struct rtsx_pcr *pcr;
1477
1478 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1479
1480 handle = pci_get_drvdata(pcidev);
1481 pcr = handle->pcr;
1482
1483 cancel_delayed_work(&pcr->carddet_work);
1484 cancel_delayed_work(&pcr->idle_work);
1485
1486 mutex_lock(&pcr->pcr_mutex);
1487
1488 rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1489
1490 pci_save_state(pcidev);
1491 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
1492 pci_disable_device(pcidev);
1493 pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
1494
1495 mutex_unlock(&pcr->pcr_mutex);
1496 return 0;
1497 }
1498
1499 static int rtsx_pci_resume(struct pci_dev *pcidev)
1500 {
1501 struct pcr_handle *handle;
1502 struct rtsx_pcr *pcr;
1503 int ret = 0;
1504
1505 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1506
1507 handle = pci_get_drvdata(pcidev);
1508 pcr = handle->pcr;
1509
1510 mutex_lock(&pcr->pcr_mutex);
1511
1512 pci_set_power_state(pcidev, PCI_D0);
1513 pci_restore_state(pcidev);
1514 ret = pci_enable_device(pcidev);
1515 if (ret)
1516 goto out;
1517 pci_set_master(pcidev);
1518
1519 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1520 if (ret)
1521 goto out;
1522
1523 ret = rtsx_pci_init_hw(pcr);
1524 if (ret)
1525 goto out;
1526
1527 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1528
1529 out:
1530 mutex_unlock(&pcr->pcr_mutex);
1531 return ret;
1532 }
1533
1534 static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1535 {
1536 struct pcr_handle *handle;
1537 struct rtsx_pcr *pcr;
1538
1539 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1540
1541 handle = pci_get_drvdata(pcidev);
1542 pcr = handle->pcr;
1543 rtsx_pci_power_off(pcr, HOST_ENTER_S1);
1544
1545 pci_disable_device(pcidev);
1546 }
1547
1548 #else /* CONFIG_PM */
1549
1550 #define rtsx_pci_suspend NULL
1551 #define rtsx_pci_resume NULL
1552 #define rtsx_pci_shutdown NULL
1553
1554 #endif /* CONFIG_PM */
1555
1556 static struct pci_driver rtsx_pci_driver = {
1557 .name = DRV_NAME_RTSX_PCI,
1558 .id_table = rtsx_pci_ids,
1559 .probe = rtsx_pci_probe,
1560 .remove = rtsx_pci_remove,
1561 .suspend = rtsx_pci_suspend,
1562 .resume = rtsx_pci_resume,
1563 .shutdown = rtsx_pci_shutdown,
1564 };
1565 module_pci_driver(rtsx_pci_driver);
1566
1567 MODULE_LICENSE("GPL");
1568 MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1569 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");