2 * Driver for Realtek PCI-Express card reader
4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2, or (at your option) any
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, see <http://www.gnu.org/licenses/>.
20 * Wei WANG (wei_wang@realsil.com.cn)
21 * Micky Ching (micky_ching@realsil.com.cn)
24 #include <linux/blkdev.h>
25 #include <linux/kthread.h>
26 #include <linux/sched.h>
30 /***********************************************************************
31 * Scatter-gather transfer buffer access routines
32 ***********************************************************************/
35 * Copy a buffer of length buflen to/from the srb's transfer buffer.
36 * (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
37 * points to a list of s-g entries and we ignore srb->request_bufflen.
38 * For non-scatter-gather transfers, srb->request_buffer points to the
39 * transfer buffer itself and srb->request_bufflen is the buffer's length.)
40 * Update the *index and *offset variables so that the next copy will
41 * pick up from where this one left off.
44 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer
,
46 struct scsi_cmnd
*srb
,
49 enum xfer_buf_dir dir
)
53 /* If not using scatter-gather, just transfer the data directly. */
54 if (scsi_sg_count(srb
) == 0) {
55 unsigned char *sgbuffer
;
57 if (*offset
>= scsi_bufflen(srb
))
59 cnt
= min(buflen
, scsi_bufflen(srb
) - *offset
);
61 sgbuffer
= (unsigned char *)scsi_sglist(srb
) + *offset
;
63 if (dir
== TO_XFER_BUF
)
64 memcpy(sgbuffer
, buffer
, cnt
);
66 memcpy(buffer
, sgbuffer
, cnt
);
70 * Using scatter-gather. We have to go through the list one entry
71 * at a time. Each s-g entry contains some number of pages, and
72 * each page has to be kmap()'ed separately.
75 struct scatterlist
*sg
=
76 (struct scatterlist
*)scsi_sglist(srb
)
80 * This loop handles a single s-g list entry, which may
81 * include multiple pages. Find the initial page structure
82 * and the starting offset within the page, and update
83 * the *offset and *index values for the next loop.
86 while (cnt
< buflen
&& *index
< scsi_sg_count(srb
)) {
87 struct page
*page
= sg_page(sg
) +
88 ((sg
->offset
+ *offset
) >> PAGE_SHIFT
);
89 unsigned int poff
= (sg
->offset
+ *offset
) &
91 unsigned int sglen
= sg
->length
- *offset
;
93 if (sglen
> buflen
- cnt
) {
94 /* Transfer ends within this s-g entry */
98 /* Transfer continues to next s-g entry */
105 unsigned int plen
= min(sglen
, (unsigned int)
107 unsigned char *ptr
= kmap(page
);
109 if (dir
== TO_XFER_BUF
)
110 memcpy(ptr
+ poff
, buffer
+ cnt
, plen
);
112 memcpy(buffer
+ cnt
, ptr
+ poff
, plen
);
115 /* Start at the beginning of the next page */
124 /* Return the amount actually transferred */
129 * Store the contents of buffer into srb's transfer buffer and set the
132 void rtsx_stor_set_xfer_buf(unsigned char *buffer
,
133 unsigned int buflen
, struct scsi_cmnd
*srb
)
135 unsigned int index
= 0, offset
= 0;
137 rtsx_stor_access_xfer_buf(buffer
, buflen
, srb
, &index
, &offset
,
139 if (buflen
< scsi_bufflen(srb
))
140 scsi_set_resid(srb
, scsi_bufflen(srb
) - buflen
);
143 void rtsx_stor_get_xfer_buf(unsigned char *buffer
,
144 unsigned int buflen
, struct scsi_cmnd
*srb
)
146 unsigned int index
= 0, offset
= 0;
148 rtsx_stor_access_xfer_buf(buffer
, buflen
, srb
, &index
, &offset
,
150 if (buflen
< scsi_bufflen(srb
))
151 scsi_set_resid(srb
, scsi_bufflen(srb
) - buflen
);
154 /***********************************************************************
156 ***********************************************************************/
159 * Invoke the transport and basic error-handling/recovery methods
161 * This is used to send the message to the device and receive the response.
163 void rtsx_invoke_transport(struct scsi_cmnd
*srb
, struct rtsx_chip
*chip
)
167 result
= rtsx_scsi_handler(srb
, chip
);
170 * if the command gets aborted by the higher layers, we need to
171 * short-circuit all other processing.
173 if (rtsx_chk_stat(chip
, RTSX_STAT_ABORT
)) {
174 dev_dbg(rtsx_dev(chip
), "-- command was aborted\n");
175 srb
->result
= DID_ABORT
<< 16;
179 /* if there is a transport error, reset and don't auto-sense */
180 if (result
== TRANSPORT_ERROR
) {
181 dev_dbg(rtsx_dev(chip
), "-- transport indicates error, resetting\n");
182 srb
->result
= DID_ERROR
<< 16;
186 srb
->result
= SAM_STAT_GOOD
;
189 * If we have a failure, we're going to do a REQUEST_SENSE
190 * automatically. Note that we differentiate between a command
191 * "failure" and an "error" in the transport mechanism.
193 if (result
== TRANSPORT_FAILED
) {
194 /* set the result so the higher layers expect this data */
195 srb
->result
= SAM_STAT_CHECK_CONDITION
;
196 memcpy(srb
->sense_buffer
,
197 (unsigned char *)&chip
->sense_buffer
[SCSI_LUN(srb
)],
198 sizeof(struct sense_data_t
));
207 void rtsx_add_cmd(struct rtsx_chip
*chip
,
208 u8 cmd_type
, u16 reg_addr
, u8 mask
, u8 data
)
210 u32
*cb
= (u32
*)(chip
->host_cmds_ptr
);
213 val
|= (u32
)(cmd_type
& 0x03) << 30;
214 val
|= (u32
)(reg_addr
& 0x3FFF) << 16;
215 val
|= (u32
)mask
<< 8;
218 spin_lock_irq(&chip
->rtsx
->reg_lock
);
219 if (chip
->ci
< (HOST_CMDS_BUF_LEN
/ 4))
220 cb
[(chip
->ci
)++] = cpu_to_le32(val
);
222 spin_unlock_irq(&chip
->rtsx
->reg_lock
);
225 void rtsx_send_cmd_no_wait(struct rtsx_chip
*chip
)
229 rtsx_writel(chip
, RTSX_HCBAR
, chip
->host_cmds_addr
);
231 val
|= (u32
)(chip
->ci
* 4) & 0x00FFFFFF;
232 /* Hardware Auto Response */
234 rtsx_writel(chip
, RTSX_HCBCTLR
, val
);
237 int rtsx_send_cmd(struct rtsx_chip
*chip
, u8 card
, int timeout
)
239 struct rtsx_dev
*rtsx
= chip
->rtsx
;
240 struct completion trans_done
;
246 rtsx
->check_card_cd
= SD_EXIST
;
247 else if (card
== MS_CARD
)
248 rtsx
->check_card_cd
= MS_EXIST
;
249 else if (card
== XD_CARD
)
250 rtsx
->check_card_cd
= XD_EXIST
;
252 rtsx
->check_card_cd
= 0;
254 spin_lock_irq(&rtsx
->reg_lock
);
256 /* set up data structures for the wakeup system */
257 rtsx
->done
= &trans_done
;
258 rtsx
->trans_result
= TRANS_NOT_READY
;
259 init_completion(&trans_done
);
260 rtsx
->trans_state
= STATE_TRANS_CMD
;
262 rtsx_writel(chip
, RTSX_HCBAR
, chip
->host_cmds_addr
);
264 val
|= (u32
)(chip
->ci
* 4) & 0x00FFFFFF;
265 /* Hardware Auto Response */
267 rtsx_writel(chip
, RTSX_HCBCTLR
, val
);
269 spin_unlock_irq(&rtsx
->reg_lock
);
271 /* Wait for TRANS_OK_INT */
272 timeleft
= wait_for_completion_interruptible_timeout(
273 &trans_done
, msecs_to_jiffies(timeout
));
275 dev_dbg(rtsx_dev(chip
), "chip->int_reg = 0x%x\n",
279 goto finish_send_cmd
;
282 spin_lock_irq(&rtsx
->reg_lock
);
283 if (rtsx
->trans_result
== TRANS_RESULT_FAIL
)
285 else if (rtsx
->trans_result
== TRANS_RESULT_OK
)
288 spin_unlock_irq(&rtsx
->reg_lock
);
292 rtsx
->trans_state
= STATE_TRANS_NONE
;
295 rtsx_stop_cmd(chip
, card
);
300 static inline void rtsx_add_sg_tbl(
301 struct rtsx_chip
*chip
, u32 addr
, u32 len
, u8 option
)
303 u64
*sgb
= (u64
*)(chip
->host_sg_tbl_ptr
);
311 temp_opt
= option
& (~SG_END
);
316 val
= ((u64
)addr
<< 32) | ((u64
)temp_len
<< 12) | temp_opt
;
318 if (chip
->sgi
< (HOST_SG_TBL_BUF_LEN
/ 8))
319 sgb
[(chip
->sgi
)++] = cpu_to_le64(val
);
326 static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip
*chip
, u8 card
,
327 struct scatterlist
*sg
, int num_sg
,
329 unsigned int *offset
, int size
,
330 enum dma_data_direction dma_dir
,
333 struct rtsx_dev
*rtsx
= chip
->rtsx
;
334 struct completion trans_done
;
336 int sg_cnt
, i
, resid
;
339 struct scatterlist
*sg_ptr
;
342 if (!sg
|| (num_sg
<= 0) || !offset
|| !index
)
345 if (dma_dir
== DMA_TO_DEVICE
)
346 dir
= HOST_TO_DEVICE
;
347 else if (dma_dir
== DMA_FROM_DEVICE
)
348 dir
= DEVICE_TO_HOST
;
353 rtsx
->check_card_cd
= SD_EXIST
;
354 else if (card
== MS_CARD
)
355 rtsx
->check_card_cd
= MS_EXIST
;
356 else if (card
== XD_CARD
)
357 rtsx
->check_card_cd
= XD_EXIST
;
359 rtsx
->check_card_cd
= 0;
361 spin_lock_irq(&rtsx
->reg_lock
);
363 /* set up data structures for the wakeup system */
364 rtsx
->done
= &trans_done
;
366 rtsx
->trans_state
= STATE_TRANS_SG
;
367 rtsx
->trans_result
= TRANS_NOT_READY
;
369 spin_unlock_irq(&rtsx
->reg_lock
);
371 sg_cnt
= dma_map_sg(&rtsx
->pci
->dev
, sg
, num_sg
, dma_dir
);
377 * Usually the next entry will be @sg@ + 1, but if this sg element
378 * is part of a chained scatterlist, it could jump to the start of
379 * a new scatterlist array. So here we use sg_next to move to
382 for (i
= 0; i
< *index
; i
++)
383 sg_ptr
= sg_next(sg_ptr
);
384 for (i
= *index
; i
< sg_cnt
; i
++) {
389 addr
= sg_dma_address(sg_ptr
);
390 len
= sg_dma_len(sg_ptr
);
392 dev_dbg(rtsx_dev(chip
), "DMA addr: 0x%x, Len: 0x%x\n",
393 (unsigned int)addr
, len
);
394 dev_dbg(rtsx_dev(chip
), "*index = %d, *offset = %d\n",
399 if ((len
- *offset
) > resid
) {
404 resid
-= (len
- *offset
);
409 if ((i
== (sg_cnt
- 1)) || !resid
)
410 option
= SG_VALID
| SG_END
| SG_TRANS_DATA
;
412 option
= SG_VALID
| SG_TRANS_DATA
;
414 rtsx_add_sg_tbl(chip
, (u32
)addr
, (u32
)len
, option
);
419 sg_ptr
= sg_next(sg_ptr
);
422 dev_dbg(rtsx_dev(chip
), "SG table count = %d\n", chip
->sgi
);
424 val
|= (u32
)(dir
& 0x01) << 29;
427 spin_lock_irq(&rtsx
->reg_lock
);
429 init_completion(&trans_done
);
431 rtsx_writel(chip
, RTSX_HDBAR
, chip
->host_sg_tbl_addr
);
432 rtsx_writel(chip
, RTSX_HDBCTLR
, val
);
434 spin_unlock_irq(&rtsx
->reg_lock
);
436 timeleft
= wait_for_completion_interruptible_timeout(
437 &trans_done
, msecs_to_jiffies(timeout
));
439 dev_dbg(rtsx_dev(chip
), "Timeout (%s %d)\n",
441 dev_dbg(rtsx_dev(chip
), "chip->int_reg = 0x%x\n",
447 spin_lock_irq(&rtsx
->reg_lock
);
448 if (rtsx
->trans_result
== TRANS_RESULT_FAIL
) {
450 spin_unlock_irq(&rtsx
->reg_lock
);
453 spin_unlock_irq(&rtsx
->reg_lock
);
455 /* Wait for TRANS_OK_INT */
456 spin_lock_irq(&rtsx
->reg_lock
);
457 if (rtsx
->trans_result
== TRANS_NOT_READY
) {
458 init_completion(&trans_done
);
459 spin_unlock_irq(&rtsx
->reg_lock
);
460 timeleft
= wait_for_completion_interruptible_timeout(
461 &trans_done
, msecs_to_jiffies(timeout
));
463 dev_dbg(rtsx_dev(chip
), "Timeout (%s %d)\n",
465 dev_dbg(rtsx_dev(chip
), "chip->int_reg = 0x%x\n",
471 spin_unlock_irq(&rtsx
->reg_lock
);
474 spin_lock_irq(&rtsx
->reg_lock
);
475 if (rtsx
->trans_result
== TRANS_RESULT_FAIL
)
477 else if (rtsx
->trans_result
== TRANS_RESULT_OK
)
480 spin_unlock_irq(&rtsx
->reg_lock
);
484 rtsx
->trans_state
= STATE_TRANS_NONE
;
485 dma_unmap_sg(&rtsx
->pci
->dev
, sg
, num_sg
, dma_dir
);
488 rtsx_stop_cmd(chip
, card
);
493 static int rtsx_transfer_sglist_adma(struct rtsx_chip
*chip
, u8 card
,
494 struct scatterlist
*sg
, int num_sg
,
495 enum dma_data_direction dma_dir
,
498 struct rtsx_dev
*rtsx
= chip
->rtsx
;
499 struct completion trans_done
;
504 struct scatterlist
*sg_ptr
;
506 if (!sg
|| (num_sg
<= 0))
509 if (dma_dir
== DMA_TO_DEVICE
)
510 dir
= HOST_TO_DEVICE
;
511 else if (dma_dir
== DMA_FROM_DEVICE
)
512 dir
= DEVICE_TO_HOST
;
517 rtsx
->check_card_cd
= SD_EXIST
;
518 else if (card
== MS_CARD
)
519 rtsx
->check_card_cd
= MS_EXIST
;
520 else if (card
== XD_CARD
)
521 rtsx
->check_card_cd
= XD_EXIST
;
523 rtsx
->check_card_cd
= 0;
525 spin_lock_irq(&rtsx
->reg_lock
);
527 /* set up data structures for the wakeup system */
528 rtsx
->done
= &trans_done
;
530 rtsx
->trans_state
= STATE_TRANS_SG
;
531 rtsx
->trans_result
= TRANS_NOT_READY
;
533 spin_unlock_irq(&rtsx
->reg_lock
);
535 buf_cnt
= dma_map_sg(&rtsx
->pci
->dev
, sg
, num_sg
, dma_dir
);
539 for (i
= 0; i
<= buf_cnt
/ (HOST_SG_TBL_BUF_LEN
/ 8); i
++) {
543 if (i
== buf_cnt
/ (HOST_SG_TBL_BUF_LEN
/ 8))
544 sg_cnt
= buf_cnt
% (HOST_SG_TBL_BUF_LEN
/ 8);
546 sg_cnt
= HOST_SG_TBL_BUF_LEN
/ 8;
549 for (j
= 0; j
< sg_cnt
; j
++) {
550 dma_addr_t addr
= sg_dma_address(sg_ptr
);
551 unsigned int len
= sg_dma_len(sg_ptr
);
554 dev_dbg(rtsx_dev(chip
), "DMA addr: 0x%x, Len: 0x%x\n",
555 (unsigned int)addr
, len
);
557 if (j
== (sg_cnt
- 1))
558 option
= SG_VALID
| SG_END
| SG_TRANS_DATA
;
560 option
= SG_VALID
| SG_TRANS_DATA
;
562 rtsx_add_sg_tbl(chip
, (u32
)addr
, (u32
)len
, option
);
564 sg_ptr
= sg_next(sg_ptr
);
567 dev_dbg(rtsx_dev(chip
), "SG table count = %d\n", chip
->sgi
);
569 val
|= (u32
)(dir
& 0x01) << 29;
572 spin_lock_irq(&rtsx
->reg_lock
);
574 init_completion(&trans_done
);
576 rtsx_writel(chip
, RTSX_HDBAR
, chip
->host_sg_tbl_addr
);
577 rtsx_writel(chip
, RTSX_HDBCTLR
, val
);
579 spin_unlock_irq(&rtsx
->reg_lock
);
581 timeleft
= wait_for_completion_interruptible_timeout(
582 &trans_done
, msecs_to_jiffies(timeout
));
584 dev_dbg(rtsx_dev(chip
), "Timeout (%s %d)\n",
586 dev_dbg(rtsx_dev(chip
), "chip->int_reg = 0x%x\n",
592 spin_lock_irq(&rtsx
->reg_lock
);
593 if (rtsx
->trans_result
== TRANS_RESULT_FAIL
) {
595 spin_unlock_irq(&rtsx
->reg_lock
);
598 spin_unlock_irq(&rtsx
->reg_lock
);
603 /* Wait for TRANS_OK_INT */
604 spin_lock_irq(&rtsx
->reg_lock
);
605 if (rtsx
->trans_result
== TRANS_NOT_READY
) {
606 init_completion(&trans_done
);
607 spin_unlock_irq(&rtsx
->reg_lock
);
608 timeleft
= wait_for_completion_interruptible_timeout(
609 &trans_done
, msecs_to_jiffies(timeout
));
611 dev_dbg(rtsx_dev(chip
), "Timeout (%s %d)\n",
613 dev_dbg(rtsx_dev(chip
), "chip->int_reg = 0x%x\n",
619 spin_unlock_irq(&rtsx
->reg_lock
);
622 spin_lock_irq(&rtsx
->reg_lock
);
623 if (rtsx
->trans_result
== TRANS_RESULT_FAIL
)
625 else if (rtsx
->trans_result
== TRANS_RESULT_OK
)
628 spin_unlock_irq(&rtsx
->reg_lock
);
632 rtsx
->trans_state
= STATE_TRANS_NONE
;
633 dma_unmap_sg(&rtsx
->pci
->dev
, sg
, num_sg
, dma_dir
);
636 rtsx_stop_cmd(chip
, card
);
641 static int rtsx_transfer_buf(struct rtsx_chip
*chip
, u8 card
, void *buf
,
642 size_t len
, enum dma_data_direction dma_dir
,
645 struct rtsx_dev
*rtsx
= chip
->rtsx
;
646 struct completion trans_done
;
653 if (!buf
|| (len
<= 0))
656 if (dma_dir
== DMA_TO_DEVICE
)
657 dir
= HOST_TO_DEVICE
;
658 else if (dma_dir
== DMA_FROM_DEVICE
)
659 dir
= DEVICE_TO_HOST
;
663 addr
= dma_map_single(&rtsx
->pci
->dev
, buf
, len
, dma_dir
);
664 if (dma_mapping_error(&rtsx
->pci
->dev
, addr
))
668 rtsx
->check_card_cd
= SD_EXIST
;
669 else if (card
== MS_CARD
)
670 rtsx
->check_card_cd
= MS_EXIST
;
671 else if (card
== XD_CARD
)
672 rtsx
->check_card_cd
= XD_EXIST
;
674 rtsx
->check_card_cd
= 0;
676 val
|= (u32
)(dir
& 0x01) << 29;
677 val
|= (u32
)(len
& 0x00FFFFFF);
679 spin_lock_irq(&rtsx
->reg_lock
);
681 /* set up data structures for the wakeup system */
682 rtsx
->done
= &trans_done
;
684 init_completion(&trans_done
);
686 rtsx
->trans_state
= STATE_TRANS_BUF
;
687 rtsx
->trans_result
= TRANS_NOT_READY
;
689 rtsx_writel(chip
, RTSX_HDBAR
, addr
);
690 rtsx_writel(chip
, RTSX_HDBCTLR
, val
);
692 spin_unlock_irq(&rtsx
->reg_lock
);
694 /* Wait for TRANS_OK_INT */
695 timeleft
= wait_for_completion_interruptible_timeout(
696 &trans_done
, msecs_to_jiffies(timeout
));
698 dev_dbg(rtsx_dev(chip
), "Timeout (%s %d)\n",
700 dev_dbg(rtsx_dev(chip
), "chip->int_reg = 0x%x\n",
706 spin_lock_irq(&rtsx
->reg_lock
);
707 if (rtsx
->trans_result
== TRANS_RESULT_FAIL
)
709 else if (rtsx
->trans_result
== TRANS_RESULT_OK
)
712 spin_unlock_irq(&rtsx
->reg_lock
);
716 rtsx
->trans_state
= STATE_TRANS_NONE
;
717 dma_unmap_single(&rtsx
->pci
->dev
, addr
, len
, dma_dir
);
720 rtsx_stop_cmd(chip
, card
);
725 int rtsx_transfer_data_partial(struct rtsx_chip
*chip
, u8 card
,
726 void *buf
, size_t len
, int use_sg
,
727 unsigned int *index
, unsigned int *offset
,
728 enum dma_data_direction dma_dir
, int timeout
)
732 /* don't transfer data during abort processing */
733 if (rtsx_chk_stat(chip
, RTSX_STAT_ABORT
))
737 struct scatterlist
*sg
= buf
;
739 err
= rtsx_transfer_sglist_adma_partial(chip
, card
, sg
, use_sg
,
740 index
, offset
, (int)len
,
743 err
= rtsx_transfer_buf(chip
, card
,
744 buf
, len
, dma_dir
, timeout
);
747 if (RTSX_TST_DELINK(chip
)) {
748 RTSX_CLR_DELINK(chip
);
749 chip
->need_reinit
= SD_CARD
| MS_CARD
| XD_CARD
;
750 rtsx_reinit_cards(chip
, 1);
757 int rtsx_transfer_data(struct rtsx_chip
*chip
, u8 card
, void *buf
, size_t len
,
758 int use_sg
, enum dma_data_direction dma_dir
, int timeout
)
762 dev_dbg(rtsx_dev(chip
), "use_sg = %d\n", use_sg
);
764 /* don't transfer data during abort processing */
765 if (rtsx_chk_stat(chip
, RTSX_STAT_ABORT
))
769 err
= rtsx_transfer_sglist_adma(chip
, card
,
770 (struct scatterlist
*)buf
,
771 use_sg
, dma_dir
, timeout
);
773 err
= rtsx_transfer_buf(chip
, card
, buf
, len
, dma_dir
, timeout
);
777 if (RTSX_TST_DELINK(chip
)) {
778 RTSX_CLR_DELINK(chip
);
779 chip
->need_reinit
= SD_CARD
| MS_CARD
| XD_CARD
;
780 rtsx_reinit_cards(chip
, 1);