1 /* Copyright 2012 STEC, Inc.
3 * This file is licensed under the terms of the 3-clause
4 * BSD License (http://opensource.org/licenses/BSD-3-Clause)
5 * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
6 * at your option. Both licenses are also available in the LICENSE file
7 * distributed with this project. This file may not be copied, modified,
8 * or distributed except in accordance with those terms.
9 * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
10 * Initial Driver Design!
11 * Thomas Swann <tswann@stec-inc.com>
13 * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
14 * biomode implementation.
15 * Akhil Bhansali <abhansali@stec-inc.com>
16 * Added support for DISCARD / FLUSH and FUA.
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/pci.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/blkdev.h>
26 #include <linux/sched.h>
27 #include <linux/interrupt.h>
28 #include <linux/compiler.h>
29 #include <linux/workqueue.h>
30 #include <linux/bitops.h>
31 #include <linux/delay.h>
32 #include <linux/time.h>
33 #include <linux/hdreg.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/completion.h>
36 #include <linux/scatterlist.h>
37 #include <linux/version.h>
38 #include <linux/err.h>
39 #include <linux/aer.h>
40 #include <linux/ctype.h>
41 #include <linux/wait.h>
42 #include <linux/uio.h>
43 #include <scsi/scsi.h>
46 #include <linux/uaccess.h>
47 #include <asm/unaligned.h>
49 #include "skd_s1120.h"
51 static int skd_dbg_level
;
52 static int skd_isr_comp_limit
= 4;
58 STEC_LINK_UNKNOWN
= 0xFF
62 SKD_FLUSH_INITIALIZER
,
63 SKD_FLUSH_ZERO_SIZE_FIRST
,
64 SKD_FLUSH_DATA_SECOND
,
67 #define SKD_ASSERT(expr) \
69 if (unlikely(!(expr))) { \
70 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
71 # expr, __FILE__, __func__, __LINE__); \
75 #define DRV_NAME "skd"
76 #define DRV_VERSION "2.2.1"
77 #define DRV_BUILD_ID "0260"
78 #define PFX DRV_NAME ": "
79 #define DRV_BIN_VERSION 0x100
80 #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
82 MODULE_AUTHOR("bug-reports: support@stec-inc.com");
83 MODULE_LICENSE("Dual BSD/GPL");
85 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID
")");
86 MODULE_VERSION(DRV_VERSION
"-" DRV_BUILD_ID
);
88 #define PCI_VENDOR_ID_STEC 0x1B39
89 #define PCI_DEVICE_ID_S1120 0x0001
91 #define SKD_FUA_NV (1 << 1)
92 #define SKD_MINORS_PER_DEVICE 16
94 #define SKD_MAX_QUEUE_DEPTH 200u
96 #define SKD_PAUSE_TIMEOUT (5 * 1000)
98 #define SKD_N_FITMSG_BYTES (512u)
100 #define SKD_N_SPECIAL_CONTEXT 32u
101 #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
103 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
104 * 128KB limit. That allows 4096*4K = 16M xfer size
106 #define SKD_N_SG_PER_REQ_DEFAULT 256u
107 #define SKD_N_SG_PER_SPECIAL 256u
109 #define SKD_N_COMPLETION_ENTRY 256u
110 #define SKD_N_READ_CAP_BYTES (8u)
112 #define SKD_N_INTERNAL_BYTES (512u)
114 /* 5 bits of uniqifier, 0xF800 */
115 #define SKD_ID_INCR (0x400)
116 #define SKD_ID_TABLE_MASK (3u << 8u)
117 #define SKD_ID_RW_REQUEST (0u << 8u)
118 #define SKD_ID_INTERNAL (1u << 8u)
119 #define SKD_ID_SPECIAL_REQUEST (2u << 8u)
120 #define SKD_ID_FIT_MSG (3u << 8u)
121 #define SKD_ID_SLOT_MASK 0x00FFu
122 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
124 #define SKD_N_TIMEOUT_SLOT 4u
125 #define SKD_TIMEOUT_SLOT_MASK 3u
127 #define SKD_N_MAX_SECTORS 2048u
129 #define SKD_MAX_RETRIES 2u
131 #define SKD_TIMER_SECONDS(seconds) (seconds)
132 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
134 #define INQ_STD_NBYTES 36
136 enum skd_drvr_state
{
140 SKD_DRVR_STATE_STARTING
,
141 SKD_DRVR_STATE_ONLINE
,
142 SKD_DRVR_STATE_PAUSING
,
143 SKD_DRVR_STATE_PAUSED
,
144 SKD_DRVR_STATE_DRAINING_TIMEOUT
,
145 SKD_DRVR_STATE_RESTARTING
,
146 SKD_DRVR_STATE_RESUMING
,
147 SKD_DRVR_STATE_STOPPING
,
148 SKD_DRVR_STATE_FAULT
,
149 SKD_DRVR_STATE_DISAPPEARED
,
150 SKD_DRVR_STATE_PROTOCOL_MISMATCH
,
151 SKD_DRVR_STATE_BUSY_ERASE
,
152 SKD_DRVR_STATE_BUSY_SANITIZE
,
153 SKD_DRVR_STATE_BUSY_IMMINENT
,
154 SKD_DRVR_STATE_WAIT_BOOT
,
155 SKD_DRVR_STATE_SYNCING
,
158 #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
159 #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
160 #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
161 #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
162 #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
163 #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
164 #define SKD_START_WAIT_SECONDS 90u
170 SKD_REQ_STATE_COMPLETED
,
171 SKD_REQ_STATE_TIMEOUT
,
172 SKD_REQ_STATE_ABORTED
,
175 enum skd_fit_msg_state
{
180 enum skd_check_status_action
{
181 SKD_CHECK_STATUS_REPORT_GOOD
,
182 SKD_CHECK_STATUS_REPORT_SMART_ALERT
,
183 SKD_CHECK_STATUS_REQUEUE_REQUEST
,
184 SKD_CHECK_STATUS_REPORT_ERROR
,
185 SKD_CHECK_STATUS_BUSY_IMMINENT
,
188 struct skd_fitmsg_context
{
189 enum skd_fit_msg_state state
;
191 struct skd_fitmsg_context
*next
;
200 dma_addr_t mb_dma_address
;
203 struct skd_request_context
{
204 enum skd_req_state state
;
206 struct skd_request_context
*next
;
216 struct scatterlist
*sg
;
220 struct fit_sg_descriptor
*sksg_list
;
221 dma_addr_t sksg_dma_address
;
223 struct fit_completion_entry_v1 completion
;
225 struct fit_comp_error_info err_info
;
228 #define SKD_DATA_DIR_HOST_TO_CARD 1
229 #define SKD_DATA_DIR_CARD_TO_HOST 2
231 struct skd_special_context
{
232 struct skd_request_context req
;
237 dma_addr_t db_dma_address
;
240 dma_addr_t mb_dma_address
;
253 struct sg_iovec
*iov
;
254 struct sg_iovec no_iov_iov
;
256 struct skd_special_context
*skspcl
;
259 typedef enum skd_irq_type
{
265 #define SKD_MAX_BARS 2
268 volatile void __iomem
*mem_map
[SKD_MAX_BARS
];
269 resource_size_t mem_phys
[SKD_MAX_BARS
];
270 u32 mem_size
[SKD_MAX_BARS
];
272 struct skd_msix_entry
*msix_entries
;
274 struct pci_dev
*pdev
;
275 int pcie_error_reporting_is_enabled
;
278 struct gendisk
*disk
;
279 struct request_queue
*queue
;
280 struct device
*class_dev
;
284 atomic_t device_count
;
290 enum skd_drvr_state state
;
294 u32 cur_max_queue_depth
;
295 u32 queue_low_water_mark
;
296 u32 dev_max_queue_depth
;
298 u32 num_fitmsg_context
;
301 u32 timeout_slot
[SKD_N_TIMEOUT_SLOT
];
303 struct skd_fitmsg_context
*skmsg_free_list
;
304 struct skd_fitmsg_context
*skmsg_table
;
306 struct skd_request_context
*skreq_free_list
;
307 struct skd_request_context
*skreq_table
;
309 struct skd_special_context
*skspcl_free_list
;
310 struct skd_special_context
*skspcl_table
;
312 struct skd_special_context internal_skspcl
;
313 u32 read_cap_blocksize
;
314 u32 read_cap_last_lba
;
315 int read_cap_is_valid
;
316 int inquiry_is_valid
;
317 u8 inq_serial_num
[13]; /*12 chars plus null term */
318 u8 id_str
[80]; /* holds a composite name (pci + sernum) */
322 struct fit_completion_entry_v1
*skcomp_table
;
323 struct fit_comp_error_info
*skerr_table
;
324 dma_addr_t cq_dma_address
;
326 wait_queue_head_t waitq
;
328 struct timer_list timer
;
339 u32 connect_time_stamp
;
341 #define SKD_MAX_CONNECT_RETRIES 16
347 struct work_struct completion_worker
;
350 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
351 #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
352 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
354 static inline u32
skd_reg_read32(struct skd_device
*skdev
, u32 offset
)
358 if (likely(skdev
->dbg_level
< 2))
359 return readl(skdev
->mem_map
[1] + offset
);
362 val
= readl(skdev
->mem_map
[1] + offset
);
364 pr_debug("%s:%s:%d offset %x = %x\n",
365 skdev
->name
, __func__
, __LINE__
, offset
, val
);
371 static inline void skd_reg_write32(struct skd_device
*skdev
, u32 val
,
374 if (likely(skdev
->dbg_level
< 2)) {
375 writel(val
, skdev
->mem_map
[1] + offset
);
379 writel(val
, skdev
->mem_map
[1] + offset
);
381 pr_debug("%s:%s:%d offset %x = %x\n",
382 skdev
->name
, __func__
, __LINE__
, offset
, val
);
386 static inline void skd_reg_write64(struct skd_device
*skdev
, u64 val
,
389 if (likely(skdev
->dbg_level
< 2)) {
390 writeq(val
, skdev
->mem_map
[1] + offset
);
394 writeq(val
, skdev
->mem_map
[1] + offset
);
396 pr_debug("%s:%s:%d offset %x = %016llx\n",
397 skdev
->name
, __func__
, __LINE__
, offset
, val
);
402 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
403 static int skd_isr_type
= SKD_IRQ_DEFAULT
;
405 module_param(skd_isr_type
, int, 0444);
406 MODULE_PARM_DESC(skd_isr_type
, "Interrupt type capability."
407 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
409 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
410 static int skd_max_req_per_msg
= SKD_MAX_REQ_PER_MSG_DEFAULT
;
412 module_param(skd_max_req_per_msg
, int, 0444);
413 MODULE_PARM_DESC(skd_max_req_per_msg
,
414 "Maximum SCSI requests packed in a single message."
415 " (1-14, default==1)");
417 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
418 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
419 static int skd_max_queue_depth
= SKD_MAX_QUEUE_DEPTH_DEFAULT
;
421 module_param(skd_max_queue_depth
, int, 0444);
422 MODULE_PARM_DESC(skd_max_queue_depth
,
423 "Maximum SCSI requests issued to s1120."
424 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR
")");
426 static int skd_sgs_per_request
= SKD_N_SG_PER_REQ_DEFAULT
;
427 module_param(skd_sgs_per_request
, int, 0444);
428 MODULE_PARM_DESC(skd_sgs_per_request
,
429 "Maximum SG elements per block request."
430 " (1-4096, default==256)");
432 static int skd_max_pass_thru
= SKD_N_SPECIAL_CONTEXT
;
433 module_param(skd_max_pass_thru
, int, 0444);
434 MODULE_PARM_DESC(skd_max_pass_thru
,
435 "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
437 module_param(skd_dbg_level
, int, 0444);
438 MODULE_PARM_DESC(skd_dbg_level
, "s1120 debug level (0,1,2)");
440 module_param(skd_isr_comp_limit
, int, 0444);
441 MODULE_PARM_DESC(skd_isr_comp_limit
, "s1120 isr comp limit (0=none) default=4");
443 /* Major device number dynamically assigned. */
444 static u32 skd_major
;
446 static void skd_destruct(struct skd_device
*skdev
);
447 static const struct block_device_operations skd_blockdev_ops
;
448 static void skd_send_fitmsg(struct skd_device
*skdev
,
449 struct skd_fitmsg_context
*skmsg
);
450 static void skd_send_special_fitmsg(struct skd_device
*skdev
,
451 struct skd_special_context
*skspcl
);
452 static void skd_request_fn(struct request_queue
*rq
);
453 static void skd_end_request(struct skd_device
*skdev
,
454 struct skd_request_context
*skreq
, blk_status_t status
);
455 static bool skd_preop_sg_list(struct skd_device
*skdev
,
456 struct skd_request_context
*skreq
);
457 static void skd_postop_sg_list(struct skd_device
*skdev
,
458 struct skd_request_context
*skreq
);
460 static void skd_restart_device(struct skd_device
*skdev
);
461 static int skd_quiesce_dev(struct skd_device
*skdev
);
462 static int skd_unquiesce_dev(struct skd_device
*skdev
);
463 static void skd_release_special(struct skd_device
*skdev
,
464 struct skd_special_context
*skspcl
);
465 static void skd_disable_interrupts(struct skd_device
*skdev
);
466 static void skd_isr_fwstate(struct skd_device
*skdev
);
467 static void skd_recover_requests(struct skd_device
*skdev
, int requeue
);
468 static void skd_soft_reset(struct skd_device
*skdev
);
470 static const char *skd_name(struct skd_device
*skdev
);
471 const char *skd_drive_state_to_str(int state
);
472 const char *skd_skdev_state_to_str(enum skd_drvr_state state
);
473 static void skd_log_skdev(struct skd_device
*skdev
, const char *event
);
474 static void skd_log_skmsg(struct skd_device
*skdev
,
475 struct skd_fitmsg_context
*skmsg
, const char *event
);
476 static void skd_log_skreq(struct skd_device
*skdev
,
477 struct skd_request_context
*skreq
, const char *event
);
480 *****************************************************************************
481 * READ/WRITE REQUESTS
482 *****************************************************************************
484 static void skd_fail_all_pending(struct skd_device
*skdev
)
486 struct request_queue
*q
= skdev
->queue
;
490 req
= blk_peek_request(q
);
493 blk_start_request(req
);
494 __blk_end_request_all(req
, BLK_STS_IOERR
);
499 skd_prep_rw_cdb(struct skd_scsi_request
*scsi_req
,
500 int data_dir
, unsigned lba
,
503 if (data_dir
== READ
)
504 scsi_req
->cdb
[0] = 0x28;
506 scsi_req
->cdb
[0] = 0x2a;
508 scsi_req
->cdb
[1] = 0;
509 scsi_req
->cdb
[2] = (lba
& 0xff000000) >> 24;
510 scsi_req
->cdb
[3] = (lba
& 0xff0000) >> 16;
511 scsi_req
->cdb
[4] = (lba
& 0xff00) >> 8;
512 scsi_req
->cdb
[5] = (lba
& 0xff);
513 scsi_req
->cdb
[6] = 0;
514 scsi_req
->cdb
[7] = (count
& 0xff00) >> 8;
515 scsi_req
->cdb
[8] = count
& 0xff;
516 scsi_req
->cdb
[9] = 0;
520 skd_prep_zerosize_flush_cdb(struct skd_scsi_request
*scsi_req
,
521 struct skd_request_context
*skreq
)
523 skreq
->flush_cmd
= 1;
525 scsi_req
->cdb
[0] = 0x35;
526 scsi_req
->cdb
[1] = 0;
527 scsi_req
->cdb
[2] = 0;
528 scsi_req
->cdb
[3] = 0;
529 scsi_req
->cdb
[4] = 0;
530 scsi_req
->cdb
[5] = 0;
531 scsi_req
->cdb
[6] = 0;
532 scsi_req
->cdb
[7] = 0;
533 scsi_req
->cdb
[8] = 0;
534 scsi_req
->cdb
[9] = 0;
537 static void skd_request_fn_not_online(struct request_queue
*q
);
539 static void skd_request_fn(struct request_queue
*q
)
541 struct skd_device
*skdev
= q
->queuedata
;
542 struct skd_fitmsg_context
*skmsg
= NULL
;
543 struct fit_msg_hdr
*fmh
= NULL
;
544 struct skd_request_context
*skreq
;
545 struct request
*req
= NULL
;
546 struct skd_scsi_request
*scsi_req
;
547 unsigned long io_flags
;
559 if (skdev
->state
!= SKD_DRVR_STATE_ONLINE
) {
560 skd_request_fn_not_online(q
);
564 if (blk_queue_stopped(skdev
->queue
)) {
565 if (skdev
->skmsg_free_list
== NULL
||
566 skdev
->skreq_free_list
== NULL
||
567 skdev
->in_flight
>= skdev
->queue_low_water_mark
)
568 /* There is still some kind of shortage */
571 queue_flag_clear(QUEUE_FLAG_STOPPED
, skdev
->queue
);
576 * - There are no more native requests
577 * - There are already the maximum number of requests in progress
578 * - There are no more skd_request_context entries
579 * - There are no more FIT msg buffers
585 req
= blk_peek_request(q
);
587 /* Are there any native requests to start? */
591 lba
= (u32
)blk_rq_pos(req
);
592 count
= blk_rq_sectors(req
);
593 data_dir
= rq_data_dir(req
);
594 io_flags
= req
->cmd_flags
;
596 if (req_op(req
) == REQ_OP_FLUSH
)
599 if (io_flags
& REQ_FUA
)
602 pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
603 "count=%u(0x%x) dir=%d\n",
604 skdev
->name
, __func__
, __LINE__
,
605 req
, lba
, lba
, count
, count
, data_dir
);
607 /* At this point we know there is a request */
609 /* Are too many requets already in progress? */
610 if (skdev
->in_flight
>= skdev
->cur_max_queue_depth
) {
611 pr_debug("%s:%s:%d qdepth %d, limit %d\n",
612 skdev
->name
, __func__
, __LINE__
,
613 skdev
->in_flight
, skdev
->cur_max_queue_depth
);
617 /* Is a skd_request_context available? */
618 skreq
= skdev
->skreq_free_list
;
620 pr_debug("%s:%s:%d Out of req=%p\n",
621 skdev
->name
, __func__
, __LINE__
, q
);
624 SKD_ASSERT(skreq
->state
== SKD_REQ_STATE_IDLE
);
625 SKD_ASSERT((skreq
->id
& SKD_ID_INCR
) == 0);
627 /* Now we check to see if we can get a fit msg */
629 if (skdev
->skmsg_free_list
== NULL
) {
630 pr_debug("%s:%s:%d Out of msg\n",
631 skdev
->name
, __func__
, __LINE__
);
636 skreq
->flush_cmd
= 0;
638 skreq
->sg_byte_count
= 0;
641 * OK to now dequeue request from q.
643 * At this point we are comitted to either start or reject
644 * the native request. Note that skd_request_context is
645 * available but is still at the head of the free list.
647 blk_start_request(req
);
649 skreq
->fitmsg_id
= 0;
651 /* Either a FIT msg is in progress or we have to start one. */
653 /* Are there any FIT msg buffers available? */
654 skmsg
= skdev
->skmsg_free_list
;
656 pr_debug("%s:%s:%d Out of msg skdev=%p\n",
657 skdev
->name
, __func__
, __LINE__
,
661 SKD_ASSERT(skmsg
->state
== SKD_MSG_STATE_IDLE
);
662 SKD_ASSERT((skmsg
->id
& SKD_ID_INCR
) == 0);
664 skdev
->skmsg_free_list
= skmsg
->next
;
666 skmsg
->state
= SKD_MSG_STATE_BUSY
;
667 skmsg
->id
+= SKD_ID_INCR
;
669 /* Initialize the FIT msg header */
670 fmh
= (struct fit_msg_hdr
*)skmsg
->msg_buf
;
671 memset(fmh
, 0, sizeof(*fmh
));
672 fmh
->protocol_id
= FIT_PROTOCOL_ID_SOFIT
;
673 skmsg
->length
= sizeof(*fmh
);
676 skreq
->fitmsg_id
= skmsg
->id
;
679 * Note that a FIT msg may have just been started
680 * but contains no SoFIT requests yet.
684 * Transcode the request, checking as we go. The outcome of
685 * the transcoding is represented by the error variable.
687 cmd_ptr
= &skmsg
->msg_buf
[skmsg
->length
];
688 memset(cmd_ptr
, 0, 32);
690 be_lba
= cpu_to_be32(lba
);
691 be_count
= cpu_to_be32(count
);
692 be_dmaa
= cpu_to_be64((u64
)skreq
->sksg_dma_address
);
693 cmdctxt
= skreq
->id
+ SKD_ID_INCR
;
696 scsi_req
->hdr
.tag
= cmdctxt
;
697 scsi_req
->hdr
.sg_list_dma_address
= be_dmaa
;
699 if (data_dir
== READ
)
700 skreq
->sg_data_dir
= SKD_DATA_DIR_CARD_TO_HOST
;
702 skreq
->sg_data_dir
= SKD_DATA_DIR_HOST_TO_CARD
;
704 if (flush
== SKD_FLUSH_ZERO_SIZE_FIRST
) {
705 skd_prep_zerosize_flush_cdb(scsi_req
, skreq
);
706 SKD_ASSERT(skreq
->flush_cmd
== 1);
709 skd_prep_rw_cdb(scsi_req
, data_dir
, lba
, count
);
713 scsi_req
->cdb
[1] |= SKD_FUA_NV
;
718 if (!skd_preop_sg_list(skdev
, skreq
)) {
720 * Complete the native request with error.
721 * Note that the request context is still at the
722 * head of the free list, and that the SoFIT request
723 * was encoded into the FIT msg buffer but the FIT
724 * msg length has not been updated. In short, the
725 * only resource that has been allocated but might
726 * not be used is that the FIT msg could be empty.
728 pr_debug("%s:%s:%d error Out\n",
729 skdev
->name
, __func__
, __LINE__
);
730 skd_end_request(skdev
, skreq
, BLK_STS_RESOURCE
);
735 scsi_req
->hdr
.sg_list_len_bytes
=
736 cpu_to_be32(skreq
->sg_byte_count
);
738 /* Complete resource allocations. */
739 skdev
->skreq_free_list
= skreq
->next
;
740 skreq
->state
= SKD_REQ_STATE_BUSY
;
741 skreq
->id
+= SKD_ID_INCR
;
743 skmsg
->length
+= sizeof(struct skd_scsi_request
);
744 fmh
->num_protocol_cmds_coalesced
++;
747 * Update the active request counts.
748 * Capture the timeout timestamp.
750 skreq
->timeout_stamp
= skdev
->timeout_stamp
;
751 timo_slot
= skreq
->timeout_stamp
& SKD_TIMEOUT_SLOT_MASK
;
752 skdev
->timeout_slot
[timo_slot
]++;
754 pr_debug("%s:%s:%d req=0x%x busy=%d\n",
755 skdev
->name
, __func__
, __LINE__
,
756 skreq
->id
, skdev
->in_flight
);
759 * If the FIT msg buffer is full send it.
761 if (skmsg
->length
>= SKD_N_FITMSG_BYTES
||
762 fmh
->num_protocol_cmds_coalesced
>= skd_max_req_per_msg
) {
763 skd_send_fitmsg(skdev
, skmsg
);
770 * Is a FIT msg in progress? If it is empty put the buffer back
771 * on the free list. If it is non-empty send what we got.
772 * This minimizes latency when there are fewer requests than
773 * what fits in a FIT msg.
776 /* Bigger than just a FIT msg header? */
777 if (skmsg
->length
> sizeof(struct fit_msg_hdr
)) {
778 pr_debug("%s:%s:%d sending msg=%p, len %d\n",
779 skdev
->name
, __func__
, __LINE__
,
780 skmsg
, skmsg
->length
);
781 skd_send_fitmsg(skdev
, skmsg
);
784 * The FIT msg is empty. It means we got started
785 * on the msg, but the requests were rejected.
787 skmsg
->state
= SKD_MSG_STATE_IDLE
;
788 skmsg
->id
+= SKD_ID_INCR
;
789 skmsg
->next
= skdev
->skmsg_free_list
;
790 skdev
->skmsg_free_list
= skmsg
;
797 * If req is non-NULL it means there is something to do but
798 * we are out of a resource.
801 blk_stop_queue(skdev
->queue
);
804 static void skd_end_request(struct skd_device
*skdev
,
805 struct skd_request_context
*skreq
, blk_status_t error
)
807 if (unlikely(error
)) {
808 struct request
*req
= skreq
->req
;
809 char *cmd
= (rq_data_dir(req
) == READ
) ? "read" : "write";
810 u32 lba
= (u32
)blk_rq_pos(req
);
811 u32 count
= blk_rq_sectors(req
);
813 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
814 skd_name(skdev
), cmd
, lba
, count
, skreq
->id
);
816 pr_debug("%s:%s:%d id=0x%x error=%d\n",
817 skdev
->name
, __func__
, __LINE__
, skreq
->id
, error
);
819 __blk_end_request_all(skreq
->req
, error
);
822 static bool skd_preop_sg_list(struct skd_device
*skdev
,
823 struct skd_request_context
*skreq
)
825 struct request
*req
= skreq
->req
;
826 int writing
= skreq
->sg_data_dir
== SKD_DATA_DIR_HOST_TO_CARD
;
827 int pci_dir
= writing
? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
828 struct scatterlist
*sg
= &skreq
->sg
[0];
832 skreq
->sg_byte_count
= 0;
834 /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
835 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
837 n_sg
= blk_rq_map_sg(skdev
->queue
, req
, sg
);
842 * Map scatterlist to PCI bus addresses.
843 * Note PCI might change the number of entries.
845 n_sg
= pci_map_sg(skdev
->pdev
, sg
, n_sg
, pci_dir
);
849 SKD_ASSERT(n_sg
<= skdev
->sgs_per_request
);
853 for (i
= 0; i
< n_sg
; i
++) {
854 struct fit_sg_descriptor
*sgd
= &skreq
->sksg_list
[i
];
855 u32 cnt
= sg_dma_len(&sg
[i
]);
856 uint64_t dma_addr
= sg_dma_address(&sg
[i
]);
858 sgd
->control
= FIT_SGD_CONTROL_NOT_LAST
;
859 sgd
->byte_count
= cnt
;
860 skreq
->sg_byte_count
+= cnt
;
861 sgd
->host_side_addr
= dma_addr
;
862 sgd
->dev_side_addr
= 0;
865 skreq
->sksg_list
[n_sg
- 1].next_desc_ptr
= 0LL;
866 skreq
->sksg_list
[n_sg
- 1].control
= FIT_SGD_CONTROL_LAST
;
868 if (unlikely(skdev
->dbg_level
> 1)) {
869 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
870 skdev
->name
, __func__
, __LINE__
,
871 skreq
->id
, skreq
->sksg_list
, skreq
->sksg_dma_address
);
872 for (i
= 0; i
< n_sg
; i
++) {
873 struct fit_sg_descriptor
*sgd
= &skreq
->sksg_list
[i
];
874 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
875 "addr=0x%llx next=0x%llx\n",
876 skdev
->name
, __func__
, __LINE__
,
877 i
, sgd
->byte_count
, sgd
->control
,
878 sgd
->host_side_addr
, sgd
->next_desc_ptr
);
885 static void skd_postop_sg_list(struct skd_device
*skdev
,
886 struct skd_request_context
*skreq
)
888 int writing
= skreq
->sg_data_dir
== SKD_DATA_DIR_HOST_TO_CARD
;
889 int pci_dir
= writing
? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
892 * restore the next ptr for next IO request so we
893 * don't have to set it every time.
895 skreq
->sksg_list
[skreq
->n_sg
- 1].next_desc_ptr
=
896 skreq
->sksg_dma_address
+
897 ((skreq
->n_sg
) * sizeof(struct fit_sg_descriptor
));
898 pci_unmap_sg(skdev
->pdev
, &skreq
->sg
[0], skreq
->n_sg
, pci_dir
);
901 static void skd_request_fn_not_online(struct request_queue
*q
)
903 struct skd_device
*skdev
= q
->queuedata
;
906 SKD_ASSERT(skdev
->state
!= SKD_DRVR_STATE_ONLINE
);
908 skd_log_skdev(skdev
, "req_not_online");
909 switch (skdev
->state
) {
910 case SKD_DRVR_STATE_PAUSING
:
911 case SKD_DRVR_STATE_PAUSED
:
912 case SKD_DRVR_STATE_STARTING
:
913 case SKD_DRVR_STATE_RESTARTING
:
914 case SKD_DRVR_STATE_WAIT_BOOT
:
915 /* In case of starting, we haven't started the queue,
916 * so we can't get here... but requests are
917 * possibly hanging out waiting for us because we
918 * reported the dev/skd0 already. They'll wait
919 * forever if connect doesn't complete.
920 * What to do??? delay dev/skd0 ??
922 case SKD_DRVR_STATE_BUSY
:
923 case SKD_DRVR_STATE_BUSY_IMMINENT
:
924 case SKD_DRVR_STATE_BUSY_ERASE
:
925 case SKD_DRVR_STATE_DRAINING_TIMEOUT
:
928 case SKD_DRVR_STATE_BUSY_SANITIZE
:
929 case SKD_DRVR_STATE_STOPPING
:
930 case SKD_DRVR_STATE_SYNCING
:
931 case SKD_DRVR_STATE_FAULT
:
932 case SKD_DRVR_STATE_DISAPPEARED
:
938 /* If we get here, terminate all pending block requeusts
939 * with EIO and any scsi pass thru with appropriate sense
942 skd_fail_all_pending(skdev
);
946 *****************************************************************************
948 *****************************************************************************
951 static void skd_timer_tick_not_online(struct skd_device
*skdev
);
953 static void skd_timer_tick(ulong arg
)
955 struct skd_device
*skdev
= (struct skd_device
*)arg
;
958 u32 overdue_timestamp
;
959 unsigned long reqflags
;
962 if (skdev
->state
== SKD_DRVR_STATE_FAULT
)
963 /* The driver has declared fault, and we want it to
964 * stay that way until driver is reloaded.
968 spin_lock_irqsave(&skdev
->lock
, reqflags
);
970 state
= SKD_READL(skdev
, FIT_STATUS
);
971 state
&= FIT_SR_DRIVE_STATE_MASK
;
972 if (state
!= skdev
->drive_state
)
973 skd_isr_fwstate(skdev
);
975 if (skdev
->state
!= SKD_DRVR_STATE_ONLINE
) {
976 skd_timer_tick_not_online(skdev
);
979 skdev
->timeout_stamp
++;
980 timo_slot
= skdev
->timeout_stamp
& SKD_TIMEOUT_SLOT_MASK
;
983 * All requests that happened during the previous use of
984 * this slot should be done by now. The previous use was
985 * over 7 seconds ago.
987 if (skdev
->timeout_slot
[timo_slot
] == 0)
990 /* Something is overdue */
991 overdue_timestamp
= skdev
->timeout_stamp
- SKD_N_TIMEOUT_SLOT
;
993 pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
994 skdev
->name
, __func__
, __LINE__
,
995 skdev
->timeout_slot
[timo_slot
], skdev
->in_flight
);
996 pr_err("(%s): Overdue IOs (%d), busy %d\n",
997 skd_name(skdev
), skdev
->timeout_slot
[timo_slot
],
1000 skdev
->timer_countdown
= SKD_DRAINING_TIMO
;
1001 skdev
->state
= SKD_DRVR_STATE_DRAINING_TIMEOUT
;
1002 skdev
->timo_slot
= timo_slot
;
1003 blk_stop_queue(skdev
->queue
);
1006 mod_timer(&skdev
->timer
, (jiffies
+ HZ
));
1008 spin_unlock_irqrestore(&skdev
->lock
, reqflags
);
1011 static void skd_timer_tick_not_online(struct skd_device
*skdev
)
1013 switch (skdev
->state
) {
1014 case SKD_DRVR_STATE_IDLE
:
1015 case SKD_DRVR_STATE_LOAD
:
1017 case SKD_DRVR_STATE_BUSY_SANITIZE
:
1018 pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
1019 skdev
->name
, __func__
, __LINE__
,
1020 skdev
->drive_state
, skdev
->state
);
1021 /* If we've been in sanitize for 3 seconds, we figure we're not
1022 * going to get anymore completions, so recover requests now
1024 if (skdev
->timer_countdown
> 0) {
1025 skdev
->timer_countdown
--;
1028 skd_recover_requests(skdev
, 0);
1031 case SKD_DRVR_STATE_BUSY
:
1032 case SKD_DRVR_STATE_BUSY_IMMINENT
:
1033 case SKD_DRVR_STATE_BUSY_ERASE
:
1034 pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
1035 skdev
->name
, __func__
, __LINE__
,
1036 skdev
->state
, skdev
->timer_countdown
);
1037 if (skdev
->timer_countdown
> 0) {
1038 skdev
->timer_countdown
--;
1041 pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
1042 skdev
->name
, __func__
, __LINE__
,
1043 skdev
->state
, skdev
->timer_countdown
);
1044 skd_restart_device(skdev
);
1047 case SKD_DRVR_STATE_WAIT_BOOT
:
1048 case SKD_DRVR_STATE_STARTING
:
1049 if (skdev
->timer_countdown
> 0) {
1050 skdev
->timer_countdown
--;
1053 /* For now, we fault the drive. Could attempt resets to
1054 * revcover at some point. */
1055 skdev
->state
= SKD_DRVR_STATE_FAULT
;
1057 pr_err("(%s): DriveFault Connect Timeout (%x)\n",
1058 skd_name(skdev
), skdev
->drive_state
);
1060 /*start the queue so we can respond with error to requests */
1061 /* wakeup anyone waiting for startup complete */
1062 blk_start_queue(skdev
->queue
);
1063 skdev
->gendisk_on
= -1;
1064 wake_up_interruptible(&skdev
->waitq
);
1067 case SKD_DRVR_STATE_ONLINE
:
1068 /* shouldn't get here. */
1071 case SKD_DRVR_STATE_PAUSING
:
1072 case SKD_DRVR_STATE_PAUSED
:
1075 case SKD_DRVR_STATE_DRAINING_TIMEOUT
:
1076 pr_debug("%s:%s:%d "
1077 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1078 skdev
->name
, __func__
, __LINE__
,
1080 skdev
->timer_countdown
,
1082 skdev
->timeout_slot
[skdev
->timo_slot
]);
1083 /* if the slot has cleared we can let the I/O continue */
1084 if (skdev
->timeout_slot
[skdev
->timo_slot
] == 0) {
1085 pr_debug("%s:%s:%d Slot drained, starting queue.\n",
1086 skdev
->name
, __func__
, __LINE__
);
1087 skdev
->state
= SKD_DRVR_STATE_ONLINE
;
1088 blk_start_queue(skdev
->queue
);
1091 if (skdev
->timer_countdown
> 0) {
1092 skdev
->timer_countdown
--;
1095 skd_restart_device(skdev
);
1098 case SKD_DRVR_STATE_RESTARTING
:
1099 if (skdev
->timer_countdown
> 0) {
1100 skdev
->timer_countdown
--;
1103 /* For now, we fault the drive. Could attempt resets to
1104 * revcover at some point. */
1105 skdev
->state
= SKD_DRVR_STATE_FAULT
;
1106 pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
1107 skd_name(skdev
), skdev
->drive_state
);
1110 * Recovering does two things:
1111 * 1. completes IO with error
1112 * 2. reclaims dma resources
1113 * When is it safe to recover requests?
1114 * - if the drive state is faulted
1115 * - if the state is still soft reset after out timeout
1116 * - if the drive registers are dead (state = FF)
1117 * If it is "unsafe", we still need to recover, so we will
1118 * disable pci bus mastering and disable our interrupts.
1121 if ((skdev
->drive_state
== FIT_SR_DRIVE_SOFT_RESET
) ||
1122 (skdev
->drive_state
== FIT_SR_DRIVE_FAULT
) ||
1123 (skdev
->drive_state
== FIT_SR_DRIVE_STATE_MASK
))
1124 /* It never came out of soft reset. Try to
1125 * recover the requests and then let them
1126 * fail. This is to mitigate hung processes. */
1127 skd_recover_requests(skdev
, 0);
1129 pr_err("(%s): Disable BusMaster (%x)\n",
1130 skd_name(skdev
), skdev
->drive_state
);
1131 pci_disable_device(skdev
->pdev
);
1132 skd_disable_interrupts(skdev
);
1133 skd_recover_requests(skdev
, 0);
1136 /*start the queue so we can respond with error to requests */
1137 /* wakeup anyone waiting for startup complete */
1138 blk_start_queue(skdev
->queue
);
1139 skdev
->gendisk_on
= -1;
1140 wake_up_interruptible(&skdev
->waitq
);
1143 case SKD_DRVR_STATE_RESUMING
:
1144 case SKD_DRVR_STATE_STOPPING
:
1145 case SKD_DRVR_STATE_SYNCING
:
1146 case SKD_DRVR_STATE_FAULT
:
1147 case SKD_DRVR_STATE_DISAPPEARED
:
1153 static int skd_start_timer(struct skd_device
*skdev
)
1157 init_timer(&skdev
->timer
);
1158 setup_timer(&skdev
->timer
, skd_timer_tick
, (ulong
)skdev
);
1160 rc
= mod_timer(&skdev
->timer
, (jiffies
+ HZ
));
1162 pr_err("%s: failed to start timer %d\n",
1167 static void skd_kill_timer(struct skd_device
*skdev
)
1169 del_timer_sync(&skdev
->timer
);
1173 *****************************************************************************
1175 *****************************************************************************
1177 static int skd_ioctl_sg_io(struct skd_device
*skdev
,
1178 fmode_t mode
, void __user
*argp
);
1179 static int skd_sg_io_get_and_check_args(struct skd_device
*skdev
,
1180 struct skd_sg_io
*sksgio
);
1181 static int skd_sg_io_obtain_skspcl(struct skd_device
*skdev
,
1182 struct skd_sg_io
*sksgio
);
1183 static int skd_sg_io_prep_buffering(struct skd_device
*skdev
,
1184 struct skd_sg_io
*sksgio
);
1185 static int skd_sg_io_copy_buffer(struct skd_device
*skdev
,
1186 struct skd_sg_io
*sksgio
, int dxfer_dir
);
1187 static int skd_sg_io_send_fitmsg(struct skd_device
*skdev
,
1188 struct skd_sg_io
*sksgio
);
1189 static int skd_sg_io_await(struct skd_device
*skdev
, struct skd_sg_io
*sksgio
);
1190 static int skd_sg_io_release_skspcl(struct skd_device
*skdev
,
1191 struct skd_sg_io
*sksgio
);
1192 static int skd_sg_io_put_status(struct skd_device
*skdev
,
1193 struct skd_sg_io
*sksgio
);
1195 static void skd_complete_special(struct skd_device
*skdev
,
1196 volatile struct fit_completion_entry_v1
1198 volatile struct fit_comp_error_info
*skerr
,
1199 struct skd_special_context
*skspcl
);
1201 static int skd_bdev_ioctl(struct block_device
*bdev
, fmode_t mode
,
1202 uint cmd_in
, ulong arg
)
1204 static const int sg_version_num
= 30527;
1205 int rc
= 0, timeout
;
1206 struct gendisk
*disk
= bdev
->bd_disk
;
1207 struct skd_device
*skdev
= disk
->private_data
;
1208 int __user
*p
= (int __user
*)arg
;
1210 pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
1211 skdev
->name
, __func__
, __LINE__
,
1212 disk
->disk_name
, current
->comm
, mode
, cmd_in
, arg
);
1214 if (!capable(CAP_SYS_ADMIN
))
1218 case SG_SET_TIMEOUT
:
1219 rc
= get_user(timeout
, p
);
1221 disk
->queue
->sg_timeout
= clock_t_to_jiffies(timeout
);
1223 case SG_GET_TIMEOUT
:
1224 rc
= jiffies_to_clock_t(disk
->queue
->sg_timeout
);
1226 case SG_GET_VERSION_NUM
:
1227 rc
= put_user(sg_version_num
, p
);
1230 rc
= skd_ioctl_sg_io(skdev
, mode
, (void __user
*)arg
);
1238 pr_debug("%s:%s:%d %s: completion rc %d\n",
1239 skdev
->name
, __func__
, __LINE__
, disk
->disk_name
, rc
);
1243 static int skd_ioctl_sg_io(struct skd_device
*skdev
, fmode_t mode
,
1247 struct skd_sg_io sksgio
;
1249 memset(&sksgio
, 0, sizeof(sksgio
));
1252 sksgio
.iov
= &sksgio
.no_iov_iov
;
1254 switch (skdev
->state
) {
1255 case SKD_DRVR_STATE_ONLINE
:
1256 case SKD_DRVR_STATE_BUSY_IMMINENT
:
1260 pr_debug("%s:%s:%d drive not online\n",
1261 skdev
->name
, __func__
, __LINE__
);
1266 rc
= skd_sg_io_get_and_check_args(skdev
, &sksgio
);
1270 rc
= skd_sg_io_obtain_skspcl(skdev
, &sksgio
);
1274 rc
= skd_sg_io_prep_buffering(skdev
, &sksgio
);
1278 rc
= skd_sg_io_copy_buffer(skdev
, &sksgio
, SG_DXFER_TO_DEV
);
1282 rc
= skd_sg_io_send_fitmsg(skdev
, &sksgio
);
1286 rc
= skd_sg_io_await(skdev
, &sksgio
);
1290 rc
= skd_sg_io_copy_buffer(skdev
, &sksgio
, SG_DXFER_FROM_DEV
);
1294 rc
= skd_sg_io_put_status(skdev
, &sksgio
);
1301 skd_sg_io_release_skspcl(skdev
, &sksgio
);
1303 if (sksgio
.iov
!= NULL
&& sksgio
.iov
!= &sksgio
.no_iov_iov
)
1308 static int skd_sg_io_get_and_check_args(struct skd_device
*skdev
,
1309 struct skd_sg_io
*sksgio
)
1311 struct sg_io_hdr
*sgp
= &sksgio
->sg
;
1314 if (!access_ok(VERIFY_WRITE
, sksgio
->argp
, sizeof(sg_io_hdr_t
))) {
1315 pr_debug("%s:%s:%d access sg failed %p\n",
1316 skdev
->name
, __func__
, __LINE__
, sksgio
->argp
);
1320 if (__copy_from_user(sgp
, sksgio
->argp
, sizeof(sg_io_hdr_t
))) {
1321 pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
1322 skdev
->name
, __func__
, __LINE__
, sksgio
->argp
);
1326 if (sgp
->interface_id
!= SG_INTERFACE_ID_ORIG
) {
1327 pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
1328 skdev
->name
, __func__
, __LINE__
, sgp
->interface_id
);
1332 if (sgp
->cmd_len
> sizeof(sksgio
->cdb
)) {
1333 pr_debug("%s:%s:%d cmd_len invalid %d\n",
1334 skdev
->name
, __func__
, __LINE__
, sgp
->cmd_len
);
1338 if (sgp
->iovec_count
> 256) {
1339 pr_debug("%s:%s:%d iovec_count invalid %d\n",
1340 skdev
->name
, __func__
, __LINE__
, sgp
->iovec_count
);
1344 if (sgp
->dxfer_len
> (PAGE_SIZE
* SKD_N_SG_PER_SPECIAL
)) {
1345 pr_debug("%s:%s:%d dxfer_len invalid %d\n",
1346 skdev
->name
, __func__
, __LINE__
, sgp
->dxfer_len
);
1350 switch (sgp
->dxfer_direction
) {
1355 case SG_DXFER_TO_DEV
:
1359 case SG_DXFER_FROM_DEV
:
1360 case SG_DXFER_TO_FROM_DEV
:
1365 pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
1366 skdev
->name
, __func__
, __LINE__
, sgp
->dxfer_direction
);
1370 if (copy_from_user(sksgio
->cdb
, sgp
->cmdp
, sgp
->cmd_len
)) {
1371 pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
1372 skdev
->name
, __func__
, __LINE__
, sgp
->cmdp
);
1376 if (sgp
->mx_sb_len
!= 0) {
1377 if (!access_ok(VERIFY_WRITE
, sgp
->sbp
, sgp
->mx_sb_len
)) {
1378 pr_debug("%s:%s:%d access sbp failed %p\n",
1379 skdev
->name
, __func__
, __LINE__
, sgp
->sbp
);
1384 if (sgp
->iovec_count
== 0) {
1385 sksgio
->iov
[0].iov_base
= sgp
->dxferp
;
1386 sksgio
->iov
[0].iov_len
= sgp
->dxfer_len
;
1388 sksgio
->dxfer_len
= sgp
->dxfer_len
;
1390 struct sg_iovec
*iov
;
1391 uint nbytes
= sizeof(*iov
) * sgp
->iovec_count
;
1392 size_t iov_data_len
;
1394 iov
= kmalloc(nbytes
, GFP_KERNEL
);
1396 pr_debug("%s:%s:%d alloc iovec failed %d\n",
1397 skdev
->name
, __func__
, __LINE__
,
1402 sksgio
->iovcnt
= sgp
->iovec_count
;
1404 if (copy_from_user(iov
, sgp
->dxferp
, nbytes
)) {
1405 pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
1406 skdev
->name
, __func__
, __LINE__
, sgp
->dxferp
);
1411 * Sum up the vecs, making sure they don't overflow
1414 for (i
= 0; i
< sgp
->iovec_count
; i
++) {
1415 if (iov_data_len
+ iov
[i
].iov_len
< iov_data_len
)
1417 iov_data_len
+= iov
[i
].iov_len
;
1420 /* SG_IO howto says that the shorter of the two wins */
1421 if (sgp
->dxfer_len
< iov_data_len
) {
1422 sksgio
->iovcnt
= iov_shorten((struct iovec
*)iov
,
1425 sksgio
->dxfer_len
= sgp
->dxfer_len
;
1427 sksgio
->dxfer_len
= iov_data_len
;
1430 if (sgp
->dxfer_direction
!= SG_DXFER_NONE
) {
1431 struct sg_iovec
*iov
= sksgio
->iov
;
1432 for (i
= 0; i
< sksgio
->iovcnt
; i
++, iov
++) {
1433 if (!access_ok(acc
, iov
->iov_base
, iov
->iov_len
)) {
1434 pr_debug("%s:%s:%d access data failed %p/%d\n",
1435 skdev
->name
, __func__
, __LINE__
,
1436 iov
->iov_base
, (int)iov
->iov_len
);
1445 static int skd_sg_io_obtain_skspcl(struct skd_device
*skdev
,
1446 struct skd_sg_io
*sksgio
)
1448 struct skd_special_context
*skspcl
= NULL
;
1454 spin_lock_irqsave(&skdev
->lock
, flags
);
1455 skspcl
= skdev
->skspcl_free_list
;
1456 if (skspcl
!= NULL
) {
1457 skdev
->skspcl_free_list
=
1458 (struct skd_special_context
*)skspcl
->req
.next
;
1459 skspcl
->req
.id
+= SKD_ID_INCR
;
1460 skspcl
->req
.state
= SKD_REQ_STATE_SETUP
;
1461 skspcl
->orphaned
= 0;
1462 skspcl
->req
.n_sg
= 0;
1464 spin_unlock_irqrestore(&skdev
->lock
, flags
);
1466 if (skspcl
!= NULL
) {
1471 pr_debug("%s:%s:%d blocking\n",
1472 skdev
->name
, __func__
, __LINE__
);
1474 rc
= wait_event_interruptible_timeout(
1476 (skdev
->skspcl_free_list
!= NULL
),
1477 msecs_to_jiffies(sksgio
->sg
.timeout
));
1479 pr_debug("%s:%s:%d unblocking, rc=%d\n",
1480 skdev
->name
, __func__
, __LINE__
, rc
);
1490 * If we get here rc > 0 meaning the timeout to
1491 * wait_event_interruptible_timeout() had time left, hence the
1492 * sought event -- non-empty free list -- happened.
1493 * Retry the allocation.
1496 sksgio
->skspcl
= skspcl
;
1501 static int skd_skreq_prep_buffering(struct skd_device
*skdev
,
1502 struct skd_request_context
*skreq
,
1505 u32 resid
= dxfer_len
;
1508 * The DMA engine must have aligned addresses and byte counts.
1510 resid
+= (-resid
) & 3;
1511 skreq
->sg_byte_count
= resid
;
1516 u32 nbytes
= PAGE_SIZE
;
1517 u32 ix
= skreq
->n_sg
;
1518 struct scatterlist
*sg
= &skreq
->sg
[ix
];
1519 struct fit_sg_descriptor
*sksg
= &skreq
->sksg_list
[ix
];
1525 page
= alloc_page(GFP_KERNEL
);
1529 sg_set_page(sg
, page
, nbytes
, 0);
1531 /* TODO: This should be going through a pci_???()
1532 * routine to do proper mapping. */
1533 sksg
->control
= FIT_SGD_CONTROL_NOT_LAST
;
1534 sksg
->byte_count
= nbytes
;
1536 sksg
->host_side_addr
= sg_phys(sg
);
1538 sksg
->dev_side_addr
= 0;
1539 sksg
->next_desc_ptr
= skreq
->sksg_dma_address
+
1540 (ix
+ 1) * sizeof(*sksg
);
1546 if (skreq
->n_sg
> 0) {
1547 u32 ix
= skreq
->n_sg
- 1;
1548 struct fit_sg_descriptor
*sksg
= &skreq
->sksg_list
[ix
];
1550 sksg
->control
= FIT_SGD_CONTROL_LAST
;
1551 sksg
->next_desc_ptr
= 0;
1554 if (unlikely(skdev
->dbg_level
> 1)) {
1557 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1558 skdev
->name
, __func__
, __LINE__
,
1559 skreq
->id
, skreq
->sksg_list
, skreq
->sksg_dma_address
);
1560 for (i
= 0; i
< skreq
->n_sg
; i
++) {
1561 struct fit_sg_descriptor
*sgd
= &skreq
->sksg_list
[i
];
1563 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
1564 "addr=0x%llx next=0x%llx\n",
1565 skdev
->name
, __func__
, __LINE__
,
1566 i
, sgd
->byte_count
, sgd
->control
,
1567 sgd
->host_side_addr
, sgd
->next_desc_ptr
);
1574 static int skd_sg_io_prep_buffering(struct skd_device
*skdev
,
1575 struct skd_sg_io
*sksgio
)
1577 struct skd_special_context
*skspcl
= sksgio
->skspcl
;
1578 struct skd_request_context
*skreq
= &skspcl
->req
;
1579 u32 dxfer_len
= sksgio
->dxfer_len
;
1582 rc
= skd_skreq_prep_buffering(skdev
, skreq
, dxfer_len
);
1584 * Eventually, errors or not, skd_release_special() is called
1585 * to recover allocations including partial allocations.
1590 static int skd_sg_io_copy_buffer(struct skd_device
*skdev
,
1591 struct skd_sg_io
*sksgio
, int dxfer_dir
)
1593 struct skd_special_context
*skspcl
= sksgio
->skspcl
;
1595 struct sg_iovec curiov
;
1599 u32 resid
= sksgio
->dxfer_len
;
1603 curiov
.iov_base
= NULL
;
1605 if (dxfer_dir
!= sksgio
->sg
.dxfer_direction
) {
1606 if (dxfer_dir
!= SG_DXFER_TO_DEV
||
1607 sksgio
->sg
.dxfer_direction
!= SG_DXFER_TO_FROM_DEV
)
1612 u32 nbytes
= PAGE_SIZE
;
1614 if (curiov
.iov_len
== 0) {
1615 curiov
= sksgio
->iov
[iov_ix
++];
1621 page
= sg_page(&skspcl
->req
.sg
[sksg_ix
++]);
1622 bufp
= page_address(page
);
1623 buf_len
= PAGE_SIZE
;
1626 nbytes
= min_t(u32
, nbytes
, resid
);
1627 nbytes
= min_t(u32
, nbytes
, curiov
.iov_len
);
1628 nbytes
= min_t(u32
, nbytes
, buf_len
);
1630 if (dxfer_dir
== SG_DXFER_TO_DEV
)
1631 rc
= __copy_from_user(bufp
, curiov
.iov_base
, nbytes
);
1633 rc
= __copy_to_user(curiov
.iov_base
, bufp
, nbytes
);
1639 curiov
.iov_len
-= nbytes
;
1640 curiov
.iov_base
+= nbytes
;
1647 static int skd_sg_io_send_fitmsg(struct skd_device
*skdev
,
1648 struct skd_sg_io
*sksgio
)
1650 struct skd_special_context
*skspcl
= sksgio
->skspcl
;
1651 struct fit_msg_hdr
*fmh
= (struct fit_msg_hdr
*)skspcl
->msg_buf
;
1652 struct skd_scsi_request
*scsi_req
= (struct skd_scsi_request
*)&fmh
[1];
1654 memset(skspcl
->msg_buf
, 0, SKD_N_SPECIAL_FITMSG_BYTES
);
1656 /* Initialize the FIT msg header */
1657 fmh
->protocol_id
= FIT_PROTOCOL_ID_SOFIT
;
1658 fmh
->num_protocol_cmds_coalesced
= 1;
1660 /* Initialize the SCSI request */
1661 if (sksgio
->sg
.dxfer_direction
!= SG_DXFER_NONE
)
1662 scsi_req
->hdr
.sg_list_dma_address
=
1663 cpu_to_be64(skspcl
->req
.sksg_dma_address
);
1664 scsi_req
->hdr
.tag
= skspcl
->req
.id
;
1665 scsi_req
->hdr
.sg_list_len_bytes
=
1666 cpu_to_be32(skspcl
->req
.sg_byte_count
);
1667 memcpy(scsi_req
->cdb
, sksgio
->cdb
, sizeof(scsi_req
->cdb
));
1669 skspcl
->req
.state
= SKD_REQ_STATE_BUSY
;
1670 skd_send_special_fitmsg(skdev
, skspcl
);
1675 static int skd_sg_io_await(struct skd_device
*skdev
, struct skd_sg_io
*sksgio
)
1677 unsigned long flags
;
1680 rc
= wait_event_interruptible_timeout(skdev
->waitq
,
1681 (sksgio
->skspcl
->req
.state
!=
1682 SKD_REQ_STATE_BUSY
),
1683 msecs_to_jiffies(sksgio
->sg
.
1686 spin_lock_irqsave(&skdev
->lock
, flags
);
1688 if (sksgio
->skspcl
->req
.state
== SKD_REQ_STATE_ABORTED
) {
1689 pr_debug("%s:%s:%d skspcl %p aborted\n",
1690 skdev
->name
, __func__
, __LINE__
, sksgio
->skspcl
);
1692 /* Build check cond, sense and let command finish. */
1693 /* For a timeout, we must fabricate completion and sense
1694 * data to complete the command */
1695 sksgio
->skspcl
->req
.completion
.status
=
1696 SAM_STAT_CHECK_CONDITION
;
1698 memset(&sksgio
->skspcl
->req
.err_info
, 0,
1699 sizeof(sksgio
->skspcl
->req
.err_info
));
1700 sksgio
->skspcl
->req
.err_info
.type
= 0x70;
1701 sksgio
->skspcl
->req
.err_info
.key
= ABORTED_COMMAND
;
1702 sksgio
->skspcl
->req
.err_info
.code
= 0x44;
1703 sksgio
->skspcl
->req
.err_info
.qual
= 0;
1705 } else if (sksgio
->skspcl
->req
.state
!= SKD_REQ_STATE_BUSY
)
1706 /* No longer on the adapter. We finish. */
1709 /* Something's gone wrong. Still busy. Timeout or
1710 * user interrupted (control-C). Mark as an orphan
1711 * so it will be disposed when completed. */
1712 sksgio
->skspcl
->orphaned
= 1;
1713 sksgio
->skspcl
= NULL
;
1715 pr_debug("%s:%s:%d timed out %p (%u ms)\n",
1716 skdev
->name
, __func__
, __LINE__
,
1717 sksgio
, sksgio
->sg
.timeout
);
1720 pr_debug("%s:%s:%d cntlc %p\n",
1721 skdev
->name
, __func__
, __LINE__
, sksgio
);
1726 spin_unlock_irqrestore(&skdev
->lock
, flags
);
1731 static int skd_sg_io_put_status(struct skd_device
*skdev
,
1732 struct skd_sg_io
*sksgio
)
1734 struct sg_io_hdr
*sgp
= &sksgio
->sg
;
1735 struct skd_special_context
*skspcl
= sksgio
->skspcl
;
1738 u32 nb
= be32_to_cpu(skspcl
->req
.completion
.num_returned_bytes
);
1740 sgp
->status
= skspcl
->req
.completion
.status
;
1741 resid
= sksgio
->dxfer_len
- nb
;
1743 sgp
->masked_status
= sgp
->status
& STATUS_MASK
;
1744 sgp
->msg_status
= 0;
1745 sgp
->host_status
= 0;
1746 sgp
->driver_status
= 0;
1748 if (sgp
->masked_status
|| sgp
->host_status
|| sgp
->driver_status
)
1749 sgp
->info
|= SG_INFO_CHECK
;
1751 pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
1752 skdev
->name
, __func__
, __LINE__
,
1753 sgp
->status
, sgp
->masked_status
, sgp
->resid
);
1755 if (sgp
->masked_status
== SAM_STAT_CHECK_CONDITION
) {
1756 if (sgp
->mx_sb_len
> 0) {
1757 struct fit_comp_error_info
*ei
= &skspcl
->req
.err_info
;
1758 u32 nbytes
= sizeof(*ei
);
1760 nbytes
= min_t(u32
, nbytes
, sgp
->mx_sb_len
);
1762 sgp
->sb_len_wr
= nbytes
;
1764 if (__copy_to_user(sgp
->sbp
, ei
, nbytes
)) {
1765 pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
1766 skdev
->name
, __func__
, __LINE__
,
1773 if (__copy_to_user(sksgio
->argp
, sgp
, sizeof(sg_io_hdr_t
))) {
1774 pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
1775 skdev
->name
, __func__
, __LINE__
, sksgio
->argp
);
1782 static int skd_sg_io_release_skspcl(struct skd_device
*skdev
,
1783 struct skd_sg_io
*sksgio
)
1785 struct skd_special_context
*skspcl
= sksgio
->skspcl
;
1787 if (skspcl
!= NULL
) {
1790 sksgio
->skspcl
= NULL
;
1792 spin_lock_irqsave(&skdev
->lock
, flags
);
1793 skd_release_special(skdev
, skspcl
);
1794 spin_unlock_irqrestore(&skdev
->lock
, flags
);
1801 *****************************************************************************
1802 * INTERNAL REQUESTS -- generated by driver itself
1803 *****************************************************************************
1806 static int skd_format_internal_skspcl(struct skd_device
*skdev
)
1808 struct skd_special_context
*skspcl
= &skdev
->internal_skspcl
;
1809 struct fit_sg_descriptor
*sgd
= &skspcl
->req
.sksg_list
[0];
1810 struct fit_msg_hdr
*fmh
;
1811 uint64_t dma_address
;
1812 struct skd_scsi_request
*scsi
;
1814 fmh
= (struct fit_msg_hdr
*)&skspcl
->msg_buf
[0];
1815 fmh
->protocol_id
= FIT_PROTOCOL_ID_SOFIT
;
1816 fmh
->num_protocol_cmds_coalesced
= 1;
1818 scsi
= (struct skd_scsi_request
*)&skspcl
->msg_buf
[64];
1819 memset(scsi
, 0, sizeof(*scsi
));
1820 dma_address
= skspcl
->req
.sksg_dma_address
;
1821 scsi
->hdr
.sg_list_dma_address
= cpu_to_be64(dma_address
);
1822 sgd
->control
= FIT_SGD_CONTROL_LAST
;
1823 sgd
->byte_count
= 0;
1824 sgd
->host_side_addr
= skspcl
->db_dma_address
;
1825 sgd
->dev_side_addr
= 0;
1826 sgd
->next_desc_ptr
= 0LL;
1831 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1833 static void skd_send_internal_skspcl(struct skd_device
*skdev
,
1834 struct skd_special_context
*skspcl
,
1837 struct fit_sg_descriptor
*sgd
= &skspcl
->req
.sksg_list
[0];
1838 struct skd_scsi_request
*scsi
;
1839 unsigned char *buf
= skspcl
->data_buf
;
1842 if (skspcl
->req
.state
!= SKD_REQ_STATE_IDLE
)
1844 * A refresh is already in progress.
1845 * Just wait for it to finish.
1849 SKD_ASSERT((skspcl
->req
.id
& SKD_ID_INCR
) == 0);
1850 skspcl
->req
.state
= SKD_REQ_STATE_BUSY
;
1851 skspcl
->req
.id
+= SKD_ID_INCR
;
1853 scsi
= (struct skd_scsi_request
*)&skspcl
->msg_buf
[64];
1854 scsi
->hdr
.tag
= skspcl
->req
.id
;
1856 memset(scsi
->cdb
, 0, sizeof(scsi
->cdb
));
1859 case TEST_UNIT_READY
:
1860 scsi
->cdb
[0] = TEST_UNIT_READY
;
1861 sgd
->byte_count
= 0;
1862 scsi
->hdr
.sg_list_len_bytes
= 0;
1866 scsi
->cdb
[0] = READ_CAPACITY
;
1867 sgd
->byte_count
= SKD_N_READ_CAP_BYTES
;
1868 scsi
->hdr
.sg_list_len_bytes
= cpu_to_be32(sgd
->byte_count
);
1872 scsi
->cdb
[0] = INQUIRY
;
1873 scsi
->cdb
[1] = 0x01; /* evpd */
1874 scsi
->cdb
[2] = 0x80; /* serial number page */
1875 scsi
->cdb
[4] = 0x10;
1876 sgd
->byte_count
= 16;
1877 scsi
->hdr
.sg_list_len_bytes
= cpu_to_be32(sgd
->byte_count
);
1880 case SYNCHRONIZE_CACHE
:
1881 scsi
->cdb
[0] = SYNCHRONIZE_CACHE
;
1882 sgd
->byte_count
= 0;
1883 scsi
->hdr
.sg_list_len_bytes
= 0;
1887 scsi
->cdb
[0] = WRITE_BUFFER
;
1888 scsi
->cdb
[1] = 0x02;
1889 scsi
->cdb
[7] = (WR_BUF_SIZE
& 0xFF00) >> 8;
1890 scsi
->cdb
[8] = WR_BUF_SIZE
& 0xFF;
1891 sgd
->byte_count
= WR_BUF_SIZE
;
1892 scsi
->hdr
.sg_list_len_bytes
= cpu_to_be32(sgd
->byte_count
);
1893 /* fill incrementing byte pattern */
1894 for (i
= 0; i
< sgd
->byte_count
; i
++)
1899 scsi
->cdb
[0] = READ_BUFFER
;
1900 scsi
->cdb
[1] = 0x02;
1901 scsi
->cdb
[7] = (WR_BUF_SIZE
& 0xFF00) >> 8;
1902 scsi
->cdb
[8] = WR_BUF_SIZE
& 0xFF;
1903 sgd
->byte_count
= WR_BUF_SIZE
;
1904 scsi
->hdr
.sg_list_len_bytes
= cpu_to_be32(sgd
->byte_count
);
1905 memset(skspcl
->data_buf
, 0, sgd
->byte_count
);
1909 SKD_ASSERT("Don't know what to send");
1913 skd_send_special_fitmsg(skdev
, skspcl
);
1916 static void skd_refresh_device_data(struct skd_device
*skdev
)
1918 struct skd_special_context
*skspcl
= &skdev
->internal_skspcl
;
1920 skd_send_internal_skspcl(skdev
, skspcl
, TEST_UNIT_READY
);
1923 static int skd_chk_read_buf(struct skd_device
*skdev
,
1924 struct skd_special_context
*skspcl
)
1926 unsigned char *buf
= skspcl
->data_buf
;
1929 /* check for incrementing byte pattern */
1930 for (i
= 0; i
< WR_BUF_SIZE
; i
++)
1931 if (buf
[i
] != (i
& 0xFF))
1937 static void skd_log_check_status(struct skd_device
*skdev
, u8 status
, u8 key
,
1938 u8 code
, u8 qual
, u8 fruc
)
1940 /* If the check condition is of special interest, log a message */
1941 if ((status
== SAM_STAT_CHECK_CONDITION
) && (key
== 0x02)
1942 && (code
== 0x04) && (qual
== 0x06)) {
1943 pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
1944 "ascq/fruc %02x/%02x/%02x/%02x\n",
1945 skd_name(skdev
), key
, code
, qual
, fruc
);
1949 static void skd_complete_internal(struct skd_device
*skdev
,
1950 volatile struct fit_completion_entry_v1
1952 volatile struct fit_comp_error_info
*skerr
,
1953 struct skd_special_context
*skspcl
)
1955 u8
*buf
= skspcl
->data_buf
;
1958 struct skd_scsi_request
*scsi
=
1959 (struct skd_scsi_request
*)&skspcl
->msg_buf
[64];
1961 SKD_ASSERT(skspcl
== &skdev
->internal_skspcl
);
1963 pr_debug("%s:%s:%d complete internal %x\n",
1964 skdev
->name
, __func__
, __LINE__
, scsi
->cdb
[0]);
1966 skspcl
->req
.completion
= *skcomp
;
1967 skspcl
->req
.state
= SKD_REQ_STATE_IDLE
;
1968 skspcl
->req
.id
+= SKD_ID_INCR
;
1970 status
= skspcl
->req
.completion
.status
;
1972 skd_log_check_status(skdev
, status
, skerr
->key
, skerr
->code
,
1973 skerr
->qual
, skerr
->fruc
);
1975 switch (scsi
->cdb
[0]) {
1976 case TEST_UNIT_READY
:
1977 if (status
== SAM_STAT_GOOD
)
1978 skd_send_internal_skspcl(skdev
, skspcl
, WRITE_BUFFER
);
1979 else if ((status
== SAM_STAT_CHECK_CONDITION
) &&
1980 (skerr
->key
== MEDIUM_ERROR
))
1981 skd_send_internal_skspcl(skdev
, skspcl
, WRITE_BUFFER
);
1983 if (skdev
->state
== SKD_DRVR_STATE_STOPPING
) {
1984 pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
1985 skdev
->name
, __func__
, __LINE__
,
1989 pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
1990 skdev
->name
, __func__
, __LINE__
);
1991 skd_send_internal_skspcl(skdev
, skspcl
, 0x00);
1996 if (status
== SAM_STAT_GOOD
)
1997 skd_send_internal_skspcl(skdev
, skspcl
, READ_BUFFER
);
1999 if (skdev
->state
== SKD_DRVR_STATE_STOPPING
) {
2000 pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
2001 skdev
->name
, __func__
, __LINE__
,
2005 pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
2006 skdev
->name
, __func__
, __LINE__
);
2007 skd_send_internal_skspcl(skdev
, skspcl
, 0x00);
2012 if (status
== SAM_STAT_GOOD
) {
2013 if (skd_chk_read_buf(skdev
, skspcl
) == 0)
2014 skd_send_internal_skspcl(skdev
, skspcl
,
2018 "(%s):*** W/R Buffer mismatch %d ***\n",
2019 skd_name(skdev
), skdev
->connect_retries
);
2020 if (skdev
->connect_retries
<
2021 SKD_MAX_CONNECT_RETRIES
) {
2022 skdev
->connect_retries
++;
2023 skd_soft_reset(skdev
);
2026 "(%s): W/R Buffer Connect Error\n",
2033 if (skdev
->state
== SKD_DRVR_STATE_STOPPING
) {
2034 pr_debug("%s:%s:%d "
2035 "read buffer failed, don't send anymore state 0x%x\n",
2036 skdev
->name
, __func__
, __LINE__
,
2040 pr_debug("%s:%s:%d "
2041 "**** read buffer failed, retry skerr\n",
2042 skdev
->name
, __func__
, __LINE__
);
2043 skd_send_internal_skspcl(skdev
, skspcl
, 0x00);
2048 skdev
->read_cap_is_valid
= 0;
2049 if (status
== SAM_STAT_GOOD
) {
2050 skdev
->read_cap_last_lba
=
2051 (buf
[0] << 24) | (buf
[1] << 16) |
2052 (buf
[2] << 8) | buf
[3];
2053 skdev
->read_cap_blocksize
=
2054 (buf
[4] << 24) | (buf
[5] << 16) |
2055 (buf
[6] << 8) | buf
[7];
2057 pr_debug("%s:%s:%d last lba %d, bs %d\n",
2058 skdev
->name
, __func__
, __LINE__
,
2059 skdev
->read_cap_last_lba
,
2060 skdev
->read_cap_blocksize
);
2062 set_capacity(skdev
->disk
, skdev
->read_cap_last_lba
+ 1);
2064 skdev
->read_cap_is_valid
= 1;
2066 skd_send_internal_skspcl(skdev
, skspcl
, INQUIRY
);
2067 } else if ((status
== SAM_STAT_CHECK_CONDITION
) &&
2068 (skerr
->key
== MEDIUM_ERROR
)) {
2069 skdev
->read_cap_last_lba
= ~0;
2070 set_capacity(skdev
->disk
, skdev
->read_cap_last_lba
+ 1);
2071 pr_debug("%s:%s:%d "
2072 "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
2073 skdev
->name
, __func__
, __LINE__
);
2074 skd_send_internal_skspcl(skdev
, skspcl
, INQUIRY
);
2076 pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
2077 skdev
->name
, __func__
, __LINE__
);
2078 skd_send_internal_skspcl(skdev
, skspcl
,
2084 skdev
->inquiry_is_valid
= 0;
2085 if (status
== SAM_STAT_GOOD
) {
2086 skdev
->inquiry_is_valid
= 1;
2088 for (i
= 0; i
< 12; i
++)
2089 skdev
->inq_serial_num
[i
] = buf
[i
+ 4];
2090 skdev
->inq_serial_num
[12] = 0;
2093 if (skd_unquiesce_dev(skdev
) < 0)
2094 pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
2095 skdev
->name
, __func__
, __LINE__
);
2096 /* connection is complete */
2097 skdev
->connect_retries
= 0;
2100 case SYNCHRONIZE_CACHE
:
2101 if (status
== SAM_STAT_GOOD
)
2102 skdev
->sync_done
= 1;
2104 skdev
->sync_done
= -1;
2105 wake_up_interruptible(&skdev
->waitq
);
2109 SKD_ASSERT("we didn't send this");
2114 *****************************************************************************
2116 *****************************************************************************
2119 static void skd_send_fitmsg(struct skd_device
*skdev
,
2120 struct skd_fitmsg_context
*skmsg
)
2123 struct fit_msg_hdr
*fmh
;
2125 pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
2126 skdev
->name
, __func__
, __LINE__
,
2127 skmsg
->mb_dma_address
, skdev
->in_flight
);
2128 pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
2129 skdev
->name
, __func__
, __LINE__
,
2130 skmsg
->msg_buf
, skmsg
->offset
);
2132 qcmd
= skmsg
->mb_dma_address
;
2133 qcmd
|= FIT_QCMD_QID_NORMAL
;
2135 fmh
= (struct fit_msg_hdr
*)skmsg
->msg_buf
;
2136 skmsg
->outstanding
= fmh
->num_protocol_cmds_coalesced
;
2138 if (unlikely(skdev
->dbg_level
> 1)) {
2139 u8
*bp
= (u8
*)skmsg
->msg_buf
;
2141 for (i
= 0; i
< skmsg
->length
; i
+= 8) {
2142 pr_debug("%s:%s:%d msg[%2d] %8ph\n",
2143 skdev
->name
, __func__
, __LINE__
, i
, &bp
[i
]);
2149 if (skmsg
->length
> 256)
2150 qcmd
|= FIT_QCMD_MSGSIZE_512
;
2151 else if (skmsg
->length
> 128)
2152 qcmd
|= FIT_QCMD_MSGSIZE_256
;
2153 else if (skmsg
->length
> 64)
2154 qcmd
|= FIT_QCMD_MSGSIZE_128
;
2157 * This makes no sense because the FIT msg header is
2158 * 64 bytes. If the msg is only 64 bytes long it has
2161 qcmd
|= FIT_QCMD_MSGSIZE_64
;
2163 SKD_WRITEQ(skdev
, qcmd
, FIT_Q_COMMAND
);
2166 static void skd_send_special_fitmsg(struct skd_device
*skdev
,
2167 struct skd_special_context
*skspcl
)
2171 if (unlikely(skdev
->dbg_level
> 1)) {
2172 u8
*bp
= (u8
*)skspcl
->msg_buf
;
2175 for (i
= 0; i
< SKD_N_SPECIAL_FITMSG_BYTES
; i
+= 8) {
2176 pr_debug("%s:%s:%d spcl[%2d] %8ph\n",
2177 skdev
->name
, __func__
, __LINE__
, i
, &bp
[i
]);
2182 pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2183 skdev
->name
, __func__
, __LINE__
,
2184 skspcl
, skspcl
->req
.id
, skspcl
->req
.sksg_list
,
2185 skspcl
->req
.sksg_dma_address
);
2186 for (i
= 0; i
< skspcl
->req
.n_sg
; i
++) {
2187 struct fit_sg_descriptor
*sgd
=
2188 &skspcl
->req
.sksg_list
[i
];
2190 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
2191 "addr=0x%llx next=0x%llx\n",
2192 skdev
->name
, __func__
, __LINE__
,
2193 i
, sgd
->byte_count
, sgd
->control
,
2194 sgd
->host_side_addr
, sgd
->next_desc_ptr
);
2199 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2200 * and one 64-byte SSDI command.
2202 qcmd
= skspcl
->mb_dma_address
;
2203 qcmd
|= FIT_QCMD_QID_NORMAL
+ FIT_QCMD_MSGSIZE_128
;
2205 SKD_WRITEQ(skdev
, qcmd
, FIT_Q_COMMAND
);
2209 *****************************************************************************
2211 *****************************************************************************
2214 static void skd_complete_other(struct skd_device
*skdev
,
2215 volatile struct fit_completion_entry_v1
*skcomp
,
2216 volatile struct fit_comp_error_info
*skerr
);
2225 enum skd_check_status_action action
;
2228 static struct sns_info skd_chkstat_table
[] = {
2230 { 0x70, 0x02, RECOVERED_ERROR
, 0, 0, 0x1c,
2231 SKD_CHECK_STATUS_REPORT_GOOD
},
2234 { 0x70, 0x02, NO_SENSE
, 0x0B, 0x00, 0x1E, /* warnings */
2235 SKD_CHECK_STATUS_REPORT_SMART_ALERT
},
2236 { 0x70, 0x02, NO_SENSE
, 0x5D, 0x00, 0x1E, /* thresholds */
2237 SKD_CHECK_STATUS_REPORT_SMART_ALERT
},
2238 { 0x70, 0x02, RECOVERED_ERROR
, 0x0B, 0x01, 0x1F, /* temperature over trigger */
2239 SKD_CHECK_STATUS_REPORT_SMART_ALERT
},
2241 /* Retry (with limits) */
2242 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
2243 SKD_CHECK_STATUS_REQUEUE_REQUEST
},
2244 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
2245 SKD_CHECK_STATUS_REQUEUE_REQUEST
},
2246 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
2247 SKD_CHECK_STATUS_REQUEUE_REQUEST
},
2248 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
2249 SKD_CHECK_STATUS_REQUEUE_REQUEST
},
2251 /* Busy (or about to be) */
2252 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
2253 SKD_CHECK_STATUS_BUSY_IMMINENT
},
2257 * Look up status and sense data to decide how to handle the error
2259 * mask says which fields must match e.g., mask=0x18 means check
2260 * type and stat, ignore key, asc, ascq.
2263 static enum skd_check_status_action
2264 skd_check_status(struct skd_device
*skdev
,
2265 u8 cmp_status
, volatile struct fit_comp_error_info
*skerr
)
2269 pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2270 skd_name(skdev
), skerr
->key
, skerr
->code
, skerr
->qual
,
2273 pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2274 skdev
->name
, __func__
, __LINE__
, skerr
->type
, cmp_status
,
2275 skerr
->key
, skerr
->code
, skerr
->qual
, skerr
->fruc
);
2277 /* Does the info match an entry in the good category? */
2278 n
= sizeof(skd_chkstat_table
) / sizeof(skd_chkstat_table
[0]);
2279 for (i
= 0; i
< n
; i
++) {
2280 struct sns_info
*sns
= &skd_chkstat_table
[i
];
2282 if (sns
->mask
& 0x10)
2283 if (skerr
->type
!= sns
->type
)
2286 if (sns
->mask
& 0x08)
2287 if (cmp_status
!= sns
->stat
)
2290 if (sns
->mask
& 0x04)
2291 if (skerr
->key
!= sns
->key
)
2294 if (sns
->mask
& 0x02)
2295 if (skerr
->code
!= sns
->asc
)
2298 if (sns
->mask
& 0x01)
2299 if (skerr
->qual
!= sns
->ascq
)
2302 if (sns
->action
== SKD_CHECK_STATUS_REPORT_SMART_ALERT
) {
2303 pr_err("(%s): SMART Alert: sense key/asc/ascq "
2305 skd_name(skdev
), skerr
->key
,
2306 skerr
->code
, skerr
->qual
);
2311 /* No other match, so nonzero status means error,
2312 * zero status means good
2315 pr_debug("%s:%s:%d status check: error\n",
2316 skdev
->name
, __func__
, __LINE__
);
2317 return SKD_CHECK_STATUS_REPORT_ERROR
;
2320 pr_debug("%s:%s:%d status check good default\n",
2321 skdev
->name
, __func__
, __LINE__
);
2322 return SKD_CHECK_STATUS_REPORT_GOOD
;
2325 static void skd_resolve_req_exception(struct skd_device
*skdev
,
2326 struct skd_request_context
*skreq
)
2328 u8 cmp_status
= skreq
->completion
.status
;
2330 switch (skd_check_status(skdev
, cmp_status
, &skreq
->err_info
)) {
2331 case SKD_CHECK_STATUS_REPORT_GOOD
:
2332 case SKD_CHECK_STATUS_REPORT_SMART_ALERT
:
2333 skd_end_request(skdev
, skreq
, BLK_STS_OK
);
2336 case SKD_CHECK_STATUS_BUSY_IMMINENT
:
2337 skd_log_skreq(skdev
, skreq
, "retry(busy)");
2338 blk_requeue_request(skdev
->queue
, skreq
->req
);
2339 pr_info("(%s) drive BUSY imminent\n", skd_name(skdev
));
2340 skdev
->state
= SKD_DRVR_STATE_BUSY_IMMINENT
;
2341 skdev
->timer_countdown
= SKD_TIMER_MINUTES(20);
2342 skd_quiesce_dev(skdev
);
2345 case SKD_CHECK_STATUS_REQUEUE_REQUEST
:
2346 if ((unsigned long) ++skreq
->req
->special
< SKD_MAX_RETRIES
) {
2347 skd_log_skreq(skdev
, skreq
, "retry");
2348 blk_requeue_request(skdev
->queue
, skreq
->req
);
2351 /* fall through to report error */
2353 case SKD_CHECK_STATUS_REPORT_ERROR
:
2355 skd_end_request(skdev
, skreq
, BLK_STS_IOERR
);
2360 /* assume spinlock is already held */
2361 static void skd_release_skreq(struct skd_device
*skdev
,
2362 struct skd_request_context
*skreq
)
2365 struct skd_fitmsg_context
*skmsg
;
2370 * Reclaim the FIT msg buffer if this is
2371 * the first of the requests it carried to
2372 * be completed. The FIT msg buffer used to
2373 * send this request cannot be reused until
2374 * we are sure the s1120 card has copied
2375 * it to its memory. The FIT msg might have
2376 * contained several requests. As soon as
2377 * any of them are completed we know that
2378 * the entire FIT msg was transferred.
2379 * Only the first completed request will
2380 * match the FIT msg buffer id. The FIT
2381 * msg buffer id is immediately updated.
2382 * When subsequent requests complete the FIT
2383 * msg buffer id won't match, so we know
2384 * quite cheaply that it is already done.
2386 msg_slot
= skreq
->fitmsg_id
& SKD_ID_SLOT_MASK
;
2387 SKD_ASSERT(msg_slot
< skdev
->num_fitmsg_context
);
2389 skmsg
= &skdev
->skmsg_table
[msg_slot
];
2390 if (skmsg
->id
== skreq
->fitmsg_id
) {
2391 SKD_ASSERT(skmsg
->state
== SKD_MSG_STATE_BUSY
);
2392 SKD_ASSERT(skmsg
->outstanding
> 0);
2393 skmsg
->outstanding
--;
2394 if (skmsg
->outstanding
== 0) {
2395 skmsg
->state
= SKD_MSG_STATE_IDLE
;
2396 skmsg
->id
+= SKD_ID_INCR
;
2397 skmsg
->next
= skdev
->skmsg_free_list
;
2398 skdev
->skmsg_free_list
= skmsg
;
2403 * Decrease the number of active requests.
2404 * Also decrements the count in the timeout slot.
2406 SKD_ASSERT(skdev
->in_flight
> 0);
2407 skdev
->in_flight
-= 1;
2409 timo_slot
= skreq
->timeout_stamp
& SKD_TIMEOUT_SLOT_MASK
;
2410 SKD_ASSERT(skdev
->timeout_slot
[timo_slot
] > 0);
2411 skdev
->timeout_slot
[timo_slot
] -= 1;
2419 * Reclaim the skd_request_context
2421 skreq
->state
= SKD_REQ_STATE_IDLE
;
2422 skreq
->id
+= SKD_ID_INCR
;
2423 skreq
->next
= skdev
->skreq_free_list
;
2424 skdev
->skreq_free_list
= skreq
;
2427 #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
2429 static void skd_do_inq_page_00(struct skd_device
*skdev
,
2430 volatile struct fit_completion_entry_v1
*skcomp
,
2431 volatile struct fit_comp_error_info
*skerr
,
2432 uint8_t *cdb
, uint8_t *buf
)
2434 uint16_t insert_pt
, max_bytes
, drive_pages
, drive_bytes
, new_size
;
2436 /* Caller requested "supported pages". The driver needs to insert
2439 pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
2440 skdev
->name
, __func__
, __LINE__
);
2442 /* If the device rejected the request because the CDB was
2443 * improperly formed, then just leave.
2445 if (skcomp
->status
== SAM_STAT_CHECK_CONDITION
&&
2446 skerr
->key
== ILLEGAL_REQUEST
&& skerr
->code
== 0x24)
2449 /* Get the amount of space the caller allocated */
2450 max_bytes
= (cdb
[3] << 8) | cdb
[4];
2452 /* Get the number of pages actually returned by the device */
2453 drive_pages
= (buf
[2] << 8) | buf
[3];
2454 drive_bytes
= drive_pages
+ 4;
2455 new_size
= drive_pages
+ 1;
2457 /* Supported pages must be in numerical order, so find where
2458 * the driver page needs to be inserted into the list of
2459 * pages returned by the device.
2461 for (insert_pt
= 4; insert_pt
< drive_bytes
; insert_pt
++) {
2462 if (buf
[insert_pt
] == DRIVER_INQ_EVPD_PAGE_CODE
)
2463 return; /* Device using this page code. abort */
2464 else if (buf
[insert_pt
] > DRIVER_INQ_EVPD_PAGE_CODE
)
2468 if (insert_pt
< max_bytes
) {
2471 /* Shift everything up one byte to make room. */
2472 for (u
= new_size
+ 3; u
> insert_pt
; u
--)
2473 buf
[u
] = buf
[u
- 1];
2474 buf
[insert_pt
] = DRIVER_INQ_EVPD_PAGE_CODE
;
2476 /* SCSI byte order increment of num_returned_bytes by 1 */
2477 skcomp
->num_returned_bytes
=
2478 be32_to_cpu(skcomp
->num_returned_bytes
) + 1;
2479 skcomp
->num_returned_bytes
=
2480 be32_to_cpu(skcomp
->num_returned_bytes
);
2483 /* update page length field to reflect the driver's page too */
2484 buf
[2] = (uint8_t)((new_size
>> 8) & 0xFF);
2485 buf
[3] = (uint8_t)((new_size
>> 0) & 0xFF);
2488 static void skd_get_link_info(struct pci_dev
*pdev
, u8
*speed
, u8
*width
)
2494 pcie_reg
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
2497 pci_read_config_word(pdev
, pcie_reg
+ PCI_EXP_LNKSTA
, &linksta
);
2499 pci_bus_speed
= linksta
& 0xF;
2500 pci_lanes
= (linksta
& 0x3F0) >> 4;
2502 *speed
= STEC_LINK_UNKNOWN
;
2507 switch (pci_bus_speed
) {
2509 *speed
= STEC_LINK_2_5GTS
;
2512 *speed
= STEC_LINK_5GTS
;
2515 *speed
= STEC_LINK_8GTS
;
2518 *speed
= STEC_LINK_UNKNOWN
;
2522 if (pci_lanes
<= 0x20)
2528 static void skd_do_inq_page_da(struct skd_device
*skdev
,
2529 volatile struct fit_completion_entry_v1
*skcomp
,
2530 volatile struct fit_comp_error_info
*skerr
,
2531 uint8_t *cdb
, uint8_t *buf
)
2533 struct pci_dev
*pdev
= skdev
->pdev
;
2535 struct driver_inquiry_data inq
;
2538 pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
2539 skdev
->name
, __func__
, __LINE__
);
2541 memset(&inq
, 0, sizeof(inq
));
2543 inq
.page_code
= DRIVER_INQ_EVPD_PAGE_CODE
;
2545 skd_get_link_info(pdev
, &inq
.pcie_link_speed
, &inq
.pcie_link_lanes
);
2546 inq
.pcie_bus_number
= cpu_to_be16(pdev
->bus
->number
);
2547 inq
.pcie_device_number
= PCI_SLOT(pdev
->devfn
);
2548 inq
.pcie_function_number
= PCI_FUNC(pdev
->devfn
);
2550 pci_read_config_word(pdev
, PCI_VENDOR_ID
, &val
);
2551 inq
.pcie_vendor_id
= cpu_to_be16(val
);
2553 pci_read_config_word(pdev
, PCI_DEVICE_ID
, &val
);
2554 inq
.pcie_device_id
= cpu_to_be16(val
);
2556 pci_read_config_word(pdev
, PCI_SUBSYSTEM_VENDOR_ID
, &val
);
2557 inq
.pcie_subsystem_vendor_id
= cpu_to_be16(val
);
2559 pci_read_config_word(pdev
, PCI_SUBSYSTEM_ID
, &val
);
2560 inq
.pcie_subsystem_device_id
= cpu_to_be16(val
);
2562 /* Driver version, fixed lenth, padded with spaces on the right */
2563 inq
.driver_version_length
= sizeof(inq
.driver_version
);
2564 memset(&inq
.driver_version
, ' ', sizeof(inq
.driver_version
));
2565 memcpy(inq
.driver_version
, DRV_VER_COMPL
,
2566 min(sizeof(inq
.driver_version
), strlen(DRV_VER_COMPL
)));
2568 inq
.page_length
= cpu_to_be16((sizeof(inq
) - 4));
2570 /* Clear the error set by the device */
2571 skcomp
->status
= SAM_STAT_GOOD
;
2572 memset((void *)skerr
, 0, sizeof(*skerr
));
2574 /* copy response into output buffer */
2575 max_bytes
= (cdb
[3] << 8) | cdb
[4];
2576 memcpy(buf
, &inq
, min_t(unsigned, max_bytes
, sizeof(inq
)));
2578 skcomp
->num_returned_bytes
=
2579 be32_to_cpu(min_t(uint16_t, max_bytes
, sizeof(inq
)));
2582 static void skd_do_driver_inq(struct skd_device
*skdev
,
2583 volatile struct fit_completion_entry_v1
*skcomp
,
2584 volatile struct fit_comp_error_info
*skerr
,
2585 uint8_t *cdb
, uint8_t *buf
)
2589 else if (cdb
[0] != INQUIRY
)
2590 return; /* Not an INQUIRY */
2591 else if ((cdb
[1] & 1) == 0)
2592 return; /* EVPD not set */
2593 else if (cdb
[2] == 0)
2594 /* Need to add driver's page to supported pages list */
2595 skd_do_inq_page_00(skdev
, skcomp
, skerr
, cdb
, buf
);
2596 else if (cdb
[2] == DRIVER_INQ_EVPD_PAGE_CODE
)
2597 /* Caller requested driver's page */
2598 skd_do_inq_page_da(skdev
, skcomp
, skerr
, cdb
, buf
);
2601 static unsigned char *skd_sg_1st_page_ptr(struct scatterlist
*sg
)
2610 static void skd_process_scsi_inq(struct skd_device
*skdev
,
2611 volatile struct fit_completion_entry_v1
2613 volatile struct fit_comp_error_info
*skerr
,
2614 struct skd_special_context
*skspcl
)
2617 struct fit_msg_hdr
*fmh
= (struct fit_msg_hdr
*)skspcl
->msg_buf
;
2618 struct skd_scsi_request
*scsi_req
= (struct skd_scsi_request
*)&fmh
[1];
2620 dma_sync_sg_for_cpu(skdev
->class_dev
, skspcl
->req
.sg
, skspcl
->req
.n_sg
,
2621 skspcl
->req
.sg_data_dir
);
2622 buf
= skd_sg_1st_page_ptr(skspcl
->req
.sg
);
2625 skd_do_driver_inq(skdev
, skcomp
, skerr
, scsi_req
->cdb
, buf
);
2629 static int skd_isr_completion_posted(struct skd_device
*skdev
,
2630 int limit
, int *enqueued
)
2632 volatile struct fit_completion_entry_v1
*skcmp
= NULL
;
2633 volatile struct fit_comp_error_info
*skerr
;
2636 struct skd_request_context
*skreq
;
2645 SKD_ASSERT(skdev
->skcomp_ix
< SKD_N_COMPLETION_ENTRY
);
2647 skcmp
= &skdev
->skcomp_table
[skdev
->skcomp_ix
];
2648 cmp_cycle
= skcmp
->cycle
;
2649 cmp_cntxt
= skcmp
->tag
;
2650 cmp_status
= skcmp
->status
;
2651 cmp_bytes
= be32_to_cpu(skcmp
->num_returned_bytes
);
2653 skerr
= &skdev
->skerr_table
[skdev
->skcomp_ix
];
2655 pr_debug("%s:%s:%d "
2656 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
2657 "busy=%d rbytes=0x%x proto=%d\n",
2658 skdev
->name
, __func__
, __LINE__
, skdev
->skcomp_cycle
,
2659 skdev
->skcomp_ix
, cmp_cycle
, cmp_cntxt
, cmp_status
,
2660 skdev
->in_flight
, cmp_bytes
, skdev
->proto_ver
);
2662 if (cmp_cycle
!= skdev
->skcomp_cycle
) {
2663 pr_debug("%s:%s:%d end of completions\n",
2664 skdev
->name
, __func__
, __LINE__
);
2668 * Update the completion queue head index and possibly
2669 * the completion cycle count. 8-bit wrap-around.
2672 if (skdev
->skcomp_ix
>= SKD_N_COMPLETION_ENTRY
) {
2673 skdev
->skcomp_ix
= 0;
2674 skdev
->skcomp_cycle
++;
2678 * The command context is a unique 32-bit ID. The low order
2679 * bits help locate the request. The request is usually a
2680 * r/w request (see skd_start() above) or a special request.
2683 req_slot
= req_id
& SKD_ID_SLOT_AND_TABLE_MASK
;
2685 /* Is this other than a r/w request? */
2686 if (req_slot
>= skdev
->num_req_context
) {
2688 * This is not a completion for a r/w request.
2690 skd_complete_other(skdev
, skcmp
, skerr
);
2694 skreq
= &skdev
->skreq_table
[req_slot
];
2697 * Make sure the request ID for the slot matches.
2699 if (skreq
->id
!= req_id
) {
2700 pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
2701 skdev
->name
, __func__
, __LINE__
,
2704 u16 new_id
= cmp_cntxt
;
2705 pr_err("(%s): Completion mismatch "
2706 "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2707 skd_name(skdev
), req_id
,
2714 SKD_ASSERT(skreq
->state
== SKD_REQ_STATE_BUSY
);
2716 if (skreq
->state
== SKD_REQ_STATE_ABORTED
) {
2717 pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
2718 skdev
->name
, __func__
, __LINE__
,
2720 /* a previously timed out command can
2721 * now be cleaned up */
2722 skd_release_skreq(skdev
, skreq
);
2726 skreq
->completion
= *skcmp
;
2727 if (unlikely(cmp_status
== SAM_STAT_CHECK_CONDITION
)) {
2728 skreq
->err_info
= *skerr
;
2729 skd_log_check_status(skdev
, cmp_status
, skerr
->key
,
2730 skerr
->code
, skerr
->qual
,
2733 /* Release DMA resources for the request. */
2734 if (skreq
->n_sg
> 0)
2735 skd_postop_sg_list(skdev
, skreq
);
2738 pr_debug("%s:%s:%d NULL backptr skdreq %p, "
2739 "req=0x%x req_id=0x%x\n",
2740 skdev
->name
, __func__
, __LINE__
,
2741 skreq
, skreq
->id
, req_id
);
2744 * Capture the outcome and post it back to the
2747 if (likely(cmp_status
== SAM_STAT_GOOD
))
2748 skd_end_request(skdev
, skreq
, BLK_STS_OK
);
2750 skd_resolve_req_exception(skdev
, skreq
);
2754 * Release the skreq, its FIT msg (if one), timeout slot,
2757 skd_release_skreq(skdev
, skreq
);
2759 /* skd_isr_comp_limit equal zero means no limit */
2761 if (++processed
>= limit
) {
2768 if ((skdev
->state
== SKD_DRVR_STATE_PAUSING
)
2769 && (skdev
->in_flight
) == 0) {
2770 skdev
->state
= SKD_DRVR_STATE_PAUSED
;
2771 wake_up_interruptible(&skdev
->waitq
);
2777 static void skd_complete_other(struct skd_device
*skdev
,
2778 volatile struct fit_completion_entry_v1
*skcomp
,
2779 volatile struct fit_comp_error_info
*skerr
)
2784 struct skd_special_context
*skspcl
;
2786 req_id
= skcomp
->tag
;
2787 req_table
= req_id
& SKD_ID_TABLE_MASK
;
2788 req_slot
= req_id
& SKD_ID_SLOT_MASK
;
2790 pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
2791 skdev
->name
, __func__
, __LINE__
,
2792 req_table
, req_id
, req_slot
);
2795 * Based on the request id, determine how to dispatch this completion.
2796 * This swich/case is finding the good cases and forwarding the
2797 * completion entry. Errors are reported below the switch.
2799 switch (req_table
) {
2800 case SKD_ID_RW_REQUEST
:
2802 * The caller, skd_completion_posted_isr() above,
2803 * handles r/w requests. The only way we get here
2804 * is if the req_slot is out of bounds.
2808 case SKD_ID_SPECIAL_REQUEST
:
2810 * Make sure the req_slot is in bounds and that the id
2813 if (req_slot
< skdev
->n_special
) {
2814 skspcl
= &skdev
->skspcl_table
[req_slot
];
2815 if (skspcl
->req
.id
== req_id
&&
2816 skspcl
->req
.state
== SKD_REQ_STATE_BUSY
) {
2817 skd_complete_special(skdev
,
2818 skcomp
, skerr
, skspcl
);
2824 case SKD_ID_INTERNAL
:
2825 if (req_slot
== 0) {
2826 skspcl
= &skdev
->internal_skspcl
;
2827 if (skspcl
->req
.id
== req_id
&&
2828 skspcl
->req
.state
== SKD_REQ_STATE_BUSY
) {
2829 skd_complete_internal(skdev
,
2830 skcomp
, skerr
, skspcl
);
2836 case SKD_ID_FIT_MSG
:
2838 * These id's should never appear in a completion record.
2844 * These id's should never appear anywhere;
2850 * If we get here it is a bad or stale id.
2854 static void skd_complete_special(struct skd_device
*skdev
,
2855 volatile struct fit_completion_entry_v1
2857 volatile struct fit_comp_error_info
*skerr
,
2858 struct skd_special_context
*skspcl
)
2860 pr_debug("%s:%s:%d completing special request %p\n",
2861 skdev
->name
, __func__
, __LINE__
, skspcl
);
2862 if (skspcl
->orphaned
) {
2863 /* Discard orphaned request */
2864 /* ?: Can this release directly or does it need
2865 * to use a worker? */
2866 pr_debug("%s:%s:%d release orphaned %p\n",
2867 skdev
->name
, __func__
, __LINE__
, skspcl
);
2868 skd_release_special(skdev
, skspcl
);
2872 skd_process_scsi_inq(skdev
, skcomp
, skerr
, skspcl
);
2874 skspcl
->req
.state
= SKD_REQ_STATE_COMPLETED
;
2875 skspcl
->req
.completion
= *skcomp
;
2876 skspcl
->req
.err_info
= *skerr
;
2878 skd_log_check_status(skdev
, skspcl
->req
.completion
.status
, skerr
->key
,
2879 skerr
->code
, skerr
->qual
, skerr
->fruc
);
2881 wake_up_interruptible(&skdev
->waitq
);
2884 /* assume spinlock is already held */
2885 static void skd_release_special(struct skd_device
*skdev
,
2886 struct skd_special_context
*skspcl
)
2888 int i
, was_depleted
;
2890 for (i
= 0; i
< skspcl
->req
.n_sg
; i
++) {
2891 struct page
*page
= sg_page(&skspcl
->req
.sg
[i
]);
2895 was_depleted
= (skdev
->skspcl_free_list
== NULL
);
2897 skspcl
->req
.state
= SKD_REQ_STATE_IDLE
;
2898 skspcl
->req
.id
+= SKD_ID_INCR
;
2900 (struct skd_request_context
*)skdev
->skspcl_free_list
;
2901 skdev
->skspcl_free_list
= (struct skd_special_context
*)skspcl
;
2904 pr_debug("%s:%s:%d skspcl was depleted\n",
2905 skdev
->name
, __func__
, __LINE__
);
2906 /* Free list was depleted. Their might be waiters. */
2907 wake_up_interruptible(&skdev
->waitq
);
2911 static void skd_reset_skcomp(struct skd_device
*skdev
)
2914 struct fit_completion_entry_v1
*skcomp
;
2916 nbytes
= sizeof(*skcomp
) * SKD_N_COMPLETION_ENTRY
;
2917 nbytes
+= sizeof(struct fit_comp_error_info
) * SKD_N_COMPLETION_ENTRY
;
2919 memset(skdev
->skcomp_table
, 0, nbytes
);
2921 skdev
->skcomp_ix
= 0;
2922 skdev
->skcomp_cycle
= 1;
2926 *****************************************************************************
2928 *****************************************************************************
2930 static void skd_completion_worker(struct work_struct
*work
)
2932 struct skd_device
*skdev
=
2933 container_of(work
, struct skd_device
, completion_worker
);
2934 unsigned long flags
;
2935 int flush_enqueued
= 0;
2937 spin_lock_irqsave(&skdev
->lock
, flags
);
2940 * pass in limit=0, which means no limit..
2941 * process everything in compq
2943 skd_isr_completion_posted(skdev
, 0, &flush_enqueued
);
2944 skd_request_fn(skdev
->queue
);
2946 spin_unlock_irqrestore(&skdev
->lock
, flags
);
2949 static void skd_isr_msg_from_dev(struct skd_device
*skdev
);
2952 skd_isr(int irq
, void *ptr
)
2954 struct skd_device
*skdev
;
2959 int flush_enqueued
= 0;
2961 skdev
= (struct skd_device
*)ptr
;
2962 spin_lock(&skdev
->lock
);
2965 intstat
= SKD_READL(skdev
, FIT_INT_STATUS_HOST
);
2967 ack
= FIT_INT_DEF_MASK
;
2970 pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
2971 skdev
->name
, __func__
, __LINE__
, intstat
, ack
);
2973 /* As long as there is an int pending on device, keep
2974 * running loop. When none, get out, but if we've never
2975 * done any processing, call completion handler?
2978 /* No interrupts on device, but run the completion
2982 if (likely (skdev
->state
2983 == SKD_DRVR_STATE_ONLINE
))
2990 SKD_WRITEL(skdev
, ack
, FIT_INT_STATUS_HOST
);
2992 if (likely((skdev
->state
!= SKD_DRVR_STATE_LOAD
) &&
2993 (skdev
->state
!= SKD_DRVR_STATE_STOPPING
))) {
2994 if (intstat
& FIT_ISH_COMPLETION_POSTED
) {
2996 * If we have already deferred completion
2997 * processing, don't bother running it again
3001 skd_isr_completion_posted(skdev
,
3002 skd_isr_comp_limit
, &flush_enqueued
);
3005 if (intstat
& FIT_ISH_FW_STATE_CHANGE
) {
3006 skd_isr_fwstate(skdev
);
3007 if (skdev
->state
== SKD_DRVR_STATE_FAULT
||
3009 SKD_DRVR_STATE_DISAPPEARED
) {
3010 spin_unlock(&skdev
->lock
);
3015 if (intstat
& FIT_ISH_MSG_FROM_DEV
)
3016 skd_isr_msg_from_dev(skdev
);
3020 if (unlikely(flush_enqueued
))
3021 skd_request_fn(skdev
->queue
);
3024 schedule_work(&skdev
->completion_worker
);
3025 else if (!flush_enqueued
)
3026 skd_request_fn(skdev
->queue
);
3028 spin_unlock(&skdev
->lock
);
3033 static void skd_drive_fault(struct skd_device
*skdev
)
3035 skdev
->state
= SKD_DRVR_STATE_FAULT
;
3036 pr_err("(%s): Drive FAULT\n", skd_name(skdev
));
3039 static void skd_drive_disappeared(struct skd_device
*skdev
)
3041 skdev
->state
= SKD_DRVR_STATE_DISAPPEARED
;
3042 pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev
));
3045 static void skd_isr_fwstate(struct skd_device
*skdev
)
3050 int prev_driver_state
= skdev
->state
;
3052 sense
= SKD_READL(skdev
, FIT_STATUS
);
3053 state
= sense
& FIT_SR_DRIVE_STATE_MASK
;
3055 pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
3057 skd_drive_state_to_str(skdev
->drive_state
), skdev
->drive_state
,
3058 skd_drive_state_to_str(state
), state
);
3060 skdev
->drive_state
= state
;
3062 switch (skdev
->drive_state
) {
3063 case FIT_SR_DRIVE_INIT
:
3064 if (skdev
->state
== SKD_DRVR_STATE_PROTOCOL_MISMATCH
) {
3065 skd_disable_interrupts(skdev
);
3068 if (skdev
->state
== SKD_DRVR_STATE_RESTARTING
)
3069 skd_recover_requests(skdev
, 0);
3070 if (skdev
->state
== SKD_DRVR_STATE_WAIT_BOOT
) {
3071 skdev
->timer_countdown
= SKD_STARTING_TIMO
;
3072 skdev
->state
= SKD_DRVR_STATE_STARTING
;
3073 skd_soft_reset(skdev
);
3076 mtd
= FIT_MXD_CONS(FIT_MTD_FITFW_INIT
, 0, 0);
3077 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3078 skdev
->last_mtd
= mtd
;
3081 case FIT_SR_DRIVE_ONLINE
:
3082 skdev
->cur_max_queue_depth
= skd_max_queue_depth
;
3083 if (skdev
->cur_max_queue_depth
> skdev
->dev_max_queue_depth
)
3084 skdev
->cur_max_queue_depth
= skdev
->dev_max_queue_depth
;
3086 skdev
->queue_low_water_mark
=
3087 skdev
->cur_max_queue_depth
* 2 / 3 + 1;
3088 if (skdev
->queue_low_water_mark
< 1)
3089 skdev
->queue_low_water_mark
= 1;
3091 "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
3093 skdev
->cur_max_queue_depth
,
3094 skdev
->dev_max_queue_depth
, skdev
->queue_low_water_mark
);
3096 skd_refresh_device_data(skdev
);
3099 case FIT_SR_DRIVE_BUSY
:
3100 skdev
->state
= SKD_DRVR_STATE_BUSY
;
3101 skdev
->timer_countdown
= SKD_BUSY_TIMO
;
3102 skd_quiesce_dev(skdev
);
3104 case FIT_SR_DRIVE_BUSY_SANITIZE
:
3105 /* set timer for 3 seconds, we'll abort any unfinished
3106 * commands after that expires
3108 skdev
->state
= SKD_DRVR_STATE_BUSY_SANITIZE
;
3109 skdev
->timer_countdown
= SKD_TIMER_SECONDS(3);
3110 blk_start_queue(skdev
->queue
);
3112 case FIT_SR_DRIVE_BUSY_ERASE
:
3113 skdev
->state
= SKD_DRVR_STATE_BUSY_ERASE
;
3114 skdev
->timer_countdown
= SKD_BUSY_TIMO
;
3116 case FIT_SR_DRIVE_OFFLINE
:
3117 skdev
->state
= SKD_DRVR_STATE_IDLE
;
3119 case FIT_SR_DRIVE_SOFT_RESET
:
3120 switch (skdev
->state
) {
3121 case SKD_DRVR_STATE_STARTING
:
3122 case SKD_DRVR_STATE_RESTARTING
:
3123 /* Expected by a caller of skd_soft_reset() */
3126 skdev
->state
= SKD_DRVR_STATE_RESTARTING
;
3130 case FIT_SR_DRIVE_FW_BOOTING
:
3131 pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
3132 skdev
->name
, __func__
, __LINE__
, skdev
->name
);
3133 skdev
->state
= SKD_DRVR_STATE_WAIT_BOOT
;
3134 skdev
->timer_countdown
= SKD_WAIT_BOOT_TIMO
;
3137 case FIT_SR_DRIVE_DEGRADED
:
3138 case FIT_SR_PCIE_LINK_DOWN
:
3139 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD
:
3142 case FIT_SR_DRIVE_FAULT
:
3143 skd_drive_fault(skdev
);
3144 skd_recover_requests(skdev
, 0);
3145 blk_start_queue(skdev
->queue
);
3148 /* PCIe bus returned all Fs? */
3150 pr_info("(%s): state=0x%x sense=0x%x\n",
3151 skd_name(skdev
), state
, sense
);
3152 skd_drive_disappeared(skdev
);
3153 skd_recover_requests(skdev
, 0);
3154 blk_start_queue(skdev
->queue
);
3158 * Uknown FW State. Wait for a state we recognize.
3162 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3164 skd_skdev_state_to_str(prev_driver_state
), prev_driver_state
,
3165 skd_skdev_state_to_str(skdev
->state
), skdev
->state
);
3168 static void skd_recover_requests(struct skd_device
*skdev
, int requeue
)
3172 for (i
= 0; i
< skdev
->num_req_context
; i
++) {
3173 struct skd_request_context
*skreq
= &skdev
->skreq_table
[i
];
3175 if (skreq
->state
== SKD_REQ_STATE_BUSY
) {
3176 skd_log_skreq(skdev
, skreq
, "recover");
3178 SKD_ASSERT((skreq
->id
& SKD_ID_INCR
) != 0);
3179 SKD_ASSERT(skreq
->req
!= NULL
);
3181 /* Release DMA resources for the request. */
3182 if (skreq
->n_sg
> 0)
3183 skd_postop_sg_list(skdev
, skreq
);
3186 (unsigned long) ++skreq
->req
->special
<
3188 blk_requeue_request(skdev
->queue
, skreq
->req
);
3190 skd_end_request(skdev
, skreq
, BLK_STS_IOERR
);
3194 skreq
->state
= SKD_REQ_STATE_IDLE
;
3195 skreq
->id
+= SKD_ID_INCR
;
3198 skreq
[-1].next
= skreq
;
3201 skdev
->skreq_free_list
= skdev
->skreq_table
;
3203 for (i
= 0; i
< skdev
->num_fitmsg_context
; i
++) {
3204 struct skd_fitmsg_context
*skmsg
= &skdev
->skmsg_table
[i
];
3206 if (skmsg
->state
== SKD_MSG_STATE_BUSY
) {
3207 skd_log_skmsg(skdev
, skmsg
, "salvaged");
3208 SKD_ASSERT((skmsg
->id
& SKD_ID_INCR
) != 0);
3209 skmsg
->state
= SKD_MSG_STATE_IDLE
;
3210 skmsg
->id
+= SKD_ID_INCR
;
3213 skmsg
[-1].next
= skmsg
;
3216 skdev
->skmsg_free_list
= skdev
->skmsg_table
;
3218 for (i
= 0; i
< skdev
->n_special
; i
++) {
3219 struct skd_special_context
*skspcl
= &skdev
->skspcl_table
[i
];
3221 /* If orphaned, reclaim it because it has already been reported
3222 * to the process as an error (it was just waiting for
3223 * a completion that didn't come, and now it will never come)
3224 * If busy, change to a state that will cause it to error
3225 * out in the wait routine and let it do the normal
3226 * reporting and reclaiming
3228 if (skspcl
->req
.state
== SKD_REQ_STATE_BUSY
) {
3229 if (skspcl
->orphaned
) {
3230 pr_debug("%s:%s:%d orphaned %p\n",
3231 skdev
->name
, __func__
, __LINE__
,
3233 skd_release_special(skdev
, skspcl
);
3235 pr_debug("%s:%s:%d not orphaned %p\n",
3236 skdev
->name
, __func__
, __LINE__
,
3238 skspcl
->req
.state
= SKD_REQ_STATE_ABORTED
;
3242 skdev
->skspcl_free_list
= skdev
->skspcl_table
;
3244 for (i
= 0; i
< SKD_N_TIMEOUT_SLOT
; i
++)
3245 skdev
->timeout_slot
[i
] = 0;
3247 skdev
->in_flight
= 0;
3250 static void skd_isr_msg_from_dev(struct skd_device
*skdev
)
3256 mfd
= SKD_READL(skdev
, FIT_MSG_FROM_DEVICE
);
3258 pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
3259 skdev
->name
, __func__
, __LINE__
, mfd
, skdev
->last_mtd
);
3261 /* ignore any mtd that is an ack for something we didn't send */
3262 if (FIT_MXD_TYPE(mfd
) != FIT_MXD_TYPE(skdev
->last_mtd
))
3265 switch (FIT_MXD_TYPE(mfd
)) {
3266 case FIT_MTD_FITFW_INIT
:
3267 skdev
->proto_ver
= FIT_PROTOCOL_MAJOR_VER(mfd
);
3269 if (skdev
->proto_ver
!= FIT_PROTOCOL_VERSION_1
) {
3270 pr_err("(%s): protocol mismatch\n",
3272 pr_err("(%s): got=%d support=%d\n",
3273 skdev
->name
, skdev
->proto_ver
,
3274 FIT_PROTOCOL_VERSION_1
);
3275 pr_err("(%s): please upgrade driver\n",
3277 skdev
->state
= SKD_DRVR_STATE_PROTOCOL_MISMATCH
;
3278 skd_soft_reset(skdev
);
3281 mtd
= FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH
, 0, 0);
3282 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3283 skdev
->last_mtd
= mtd
;
3286 case FIT_MTD_GET_CMDQ_DEPTH
:
3287 skdev
->dev_max_queue_depth
= FIT_MXD_DATA(mfd
);
3288 mtd
= FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH
, 0,
3289 SKD_N_COMPLETION_ENTRY
);
3290 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3291 skdev
->last_mtd
= mtd
;
3294 case FIT_MTD_SET_COMPQ_DEPTH
:
3295 SKD_WRITEQ(skdev
, skdev
->cq_dma_address
, FIT_MSG_TO_DEVICE_ARG
);
3296 mtd
= FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR
, 0, 0);
3297 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3298 skdev
->last_mtd
= mtd
;
3301 case FIT_MTD_SET_COMPQ_ADDR
:
3302 skd_reset_skcomp(skdev
);
3303 mtd
= FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID
, 0, skdev
->devno
);
3304 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3305 skdev
->last_mtd
= mtd
;
3308 case FIT_MTD_CMD_LOG_HOST_ID
:
3309 skdev
->connect_time_stamp
= get_seconds();
3310 data
= skdev
->connect_time_stamp
& 0xFFFF;
3311 mtd
= FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO
, 0, data
);
3312 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3313 skdev
->last_mtd
= mtd
;
3316 case FIT_MTD_CMD_LOG_TIME_STAMP_LO
:
3317 skdev
->drive_jiffies
= FIT_MXD_DATA(mfd
);
3318 data
= (skdev
->connect_time_stamp
>> 16) & 0xFFFF;
3319 mtd
= FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI
, 0, data
);
3320 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3321 skdev
->last_mtd
= mtd
;
3324 case FIT_MTD_CMD_LOG_TIME_STAMP_HI
:
3325 skdev
->drive_jiffies
|= (FIT_MXD_DATA(mfd
) << 16);
3326 mtd
= FIT_MXD_CONS(FIT_MTD_ARM_QUEUE
, 0, 0);
3327 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3328 skdev
->last_mtd
= mtd
;
3330 pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
3332 skdev
->connect_time_stamp
, skdev
->drive_jiffies
);
3335 case FIT_MTD_ARM_QUEUE
:
3336 skdev
->last_mtd
= 0;
3338 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3347 static void skd_disable_interrupts(struct skd_device
*skdev
)
3351 sense
= SKD_READL(skdev
, FIT_CONTROL
);
3352 sense
&= ~FIT_CR_ENABLE_INTERRUPTS
;
3353 SKD_WRITEL(skdev
, sense
, FIT_CONTROL
);
3354 pr_debug("%s:%s:%d sense 0x%x\n",
3355 skdev
->name
, __func__
, __LINE__
, sense
);
3357 /* Note that the 1s is written. A 1-bit means
3358 * disable, a 0 means enable.
3360 SKD_WRITEL(skdev
, ~0, FIT_INT_MASK_HOST
);
3363 static void skd_enable_interrupts(struct skd_device
*skdev
)
3367 /* unmask interrupts first */
3368 val
= FIT_ISH_FW_STATE_CHANGE
+
3369 FIT_ISH_COMPLETION_POSTED
+ FIT_ISH_MSG_FROM_DEV
;
3371 /* Note that the compliment of mask is written. A 1-bit means
3372 * disable, a 0 means enable. */
3373 SKD_WRITEL(skdev
, ~val
, FIT_INT_MASK_HOST
);
3374 pr_debug("%s:%s:%d interrupt mask=0x%x\n",
3375 skdev
->name
, __func__
, __LINE__
, ~val
);
3377 val
= SKD_READL(skdev
, FIT_CONTROL
);
3378 val
|= FIT_CR_ENABLE_INTERRUPTS
;
3379 pr_debug("%s:%s:%d control=0x%x\n",
3380 skdev
->name
, __func__
, __LINE__
, val
);
3381 SKD_WRITEL(skdev
, val
, FIT_CONTROL
);
3385 *****************************************************************************
3386 * START, STOP, RESTART, QUIESCE, UNQUIESCE
3387 *****************************************************************************
3390 static void skd_soft_reset(struct skd_device
*skdev
)
3394 val
= SKD_READL(skdev
, FIT_CONTROL
);
3395 val
|= (FIT_CR_SOFT_RESET
);
3396 pr_debug("%s:%s:%d control=0x%x\n",
3397 skdev
->name
, __func__
, __LINE__
, val
);
3398 SKD_WRITEL(skdev
, val
, FIT_CONTROL
);
3401 static void skd_start_device(struct skd_device
*skdev
)
3403 unsigned long flags
;
3407 spin_lock_irqsave(&skdev
->lock
, flags
);
3409 /* ack all ghost interrupts */
3410 SKD_WRITEL(skdev
, FIT_INT_DEF_MASK
, FIT_INT_STATUS_HOST
);
3412 sense
= SKD_READL(skdev
, FIT_STATUS
);
3414 pr_debug("%s:%s:%d initial status=0x%x\n",
3415 skdev
->name
, __func__
, __LINE__
, sense
);
3417 state
= sense
& FIT_SR_DRIVE_STATE_MASK
;
3418 skdev
->drive_state
= state
;
3419 skdev
->last_mtd
= 0;
3421 skdev
->state
= SKD_DRVR_STATE_STARTING
;
3422 skdev
->timer_countdown
= SKD_STARTING_TIMO
;
3424 skd_enable_interrupts(skdev
);
3426 switch (skdev
->drive_state
) {
3427 case FIT_SR_DRIVE_OFFLINE
:
3428 pr_err("(%s): Drive offline...\n", skd_name(skdev
));
3431 case FIT_SR_DRIVE_FW_BOOTING
:
3432 pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
3433 skdev
->name
, __func__
, __LINE__
, skdev
->name
);
3434 skdev
->state
= SKD_DRVR_STATE_WAIT_BOOT
;
3435 skdev
->timer_countdown
= SKD_WAIT_BOOT_TIMO
;
3438 case FIT_SR_DRIVE_BUSY_SANITIZE
:
3439 pr_info("(%s): Start: BUSY_SANITIZE\n",
3441 skdev
->state
= SKD_DRVR_STATE_BUSY_SANITIZE
;
3442 skdev
->timer_countdown
= SKD_STARTED_BUSY_TIMO
;
3445 case FIT_SR_DRIVE_BUSY_ERASE
:
3446 pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev
));
3447 skdev
->state
= SKD_DRVR_STATE_BUSY_ERASE
;
3448 skdev
->timer_countdown
= SKD_STARTED_BUSY_TIMO
;
3451 case FIT_SR_DRIVE_INIT
:
3452 case FIT_SR_DRIVE_ONLINE
:
3453 skd_soft_reset(skdev
);
3456 case FIT_SR_DRIVE_BUSY
:
3457 pr_err("(%s): Drive Busy...\n", skd_name(skdev
));
3458 skdev
->state
= SKD_DRVR_STATE_BUSY
;
3459 skdev
->timer_countdown
= SKD_STARTED_BUSY_TIMO
;
3462 case FIT_SR_DRIVE_SOFT_RESET
:
3463 pr_err("(%s) drive soft reset in prog\n",
3467 case FIT_SR_DRIVE_FAULT
:
3468 /* Fault state is bad...soft reset won't do it...
3469 * Hard reset, maybe, but does it work on device?
3470 * For now, just fault so the system doesn't hang.
3472 skd_drive_fault(skdev
);
3473 /*start the queue so we can respond with error to requests */
3474 pr_debug("%s:%s:%d starting %s queue\n",
3475 skdev
->name
, __func__
, __LINE__
, skdev
->name
);
3476 blk_start_queue(skdev
->queue
);
3477 skdev
->gendisk_on
= -1;
3478 wake_up_interruptible(&skdev
->waitq
);
3482 /* Most likely the device isn't there or isn't responding
3483 * to the BAR1 addresses. */
3484 skd_drive_disappeared(skdev
);
3485 /*start the queue so we can respond with error to requests */
3486 pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
3487 skdev
->name
, __func__
, __LINE__
, skdev
->name
);
3488 blk_start_queue(skdev
->queue
);
3489 skdev
->gendisk_on
= -1;
3490 wake_up_interruptible(&skdev
->waitq
);
3494 pr_err("(%s) Start: unknown state %x\n",
3495 skd_name(skdev
), skdev
->drive_state
);
3499 state
= SKD_READL(skdev
, FIT_CONTROL
);
3500 pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
3501 skdev
->name
, __func__
, __LINE__
, state
);
3503 state
= SKD_READL(skdev
, FIT_INT_STATUS_HOST
);
3504 pr_debug("%s:%s:%d Intr Status=0x%x\n",
3505 skdev
->name
, __func__
, __LINE__
, state
);
3507 state
= SKD_READL(skdev
, FIT_INT_MASK_HOST
);
3508 pr_debug("%s:%s:%d Intr Mask=0x%x\n",
3509 skdev
->name
, __func__
, __LINE__
, state
);
3511 state
= SKD_READL(skdev
, FIT_MSG_FROM_DEVICE
);
3512 pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
3513 skdev
->name
, __func__
, __LINE__
, state
);
3515 state
= SKD_READL(skdev
, FIT_HW_VERSION
);
3516 pr_debug("%s:%s:%d HW version=0x%x\n",
3517 skdev
->name
, __func__
, __LINE__
, state
);
3519 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3522 static void skd_stop_device(struct skd_device
*skdev
)
3524 unsigned long flags
;
3525 struct skd_special_context
*skspcl
= &skdev
->internal_skspcl
;
3529 spin_lock_irqsave(&skdev
->lock
, flags
);
3531 if (skdev
->state
!= SKD_DRVR_STATE_ONLINE
) {
3532 pr_err("(%s): skd_stop_device not online no sync\n",
3537 if (skspcl
->req
.state
!= SKD_REQ_STATE_IDLE
) {
3538 pr_err("(%s): skd_stop_device no special\n",
3543 skdev
->state
= SKD_DRVR_STATE_SYNCING
;
3544 skdev
->sync_done
= 0;
3546 skd_send_internal_skspcl(skdev
, skspcl
, SYNCHRONIZE_CACHE
);
3548 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3550 wait_event_interruptible_timeout(skdev
->waitq
,
3551 (skdev
->sync_done
), (10 * HZ
));
3553 spin_lock_irqsave(&skdev
->lock
, flags
);
3555 switch (skdev
->sync_done
) {
3557 pr_err("(%s): skd_stop_device no sync\n",
3561 pr_err("(%s): skd_stop_device sync done\n",
3565 pr_err("(%s): skd_stop_device sync error\n",
3570 skdev
->state
= SKD_DRVR_STATE_STOPPING
;
3571 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3573 skd_kill_timer(skdev
);
3575 spin_lock_irqsave(&skdev
->lock
, flags
);
3576 skd_disable_interrupts(skdev
);
3578 /* ensure all ints on device are cleared */
3579 /* soft reset the device to unload with a clean slate */
3580 SKD_WRITEL(skdev
, FIT_INT_DEF_MASK
, FIT_INT_STATUS_HOST
);
3581 SKD_WRITEL(skdev
, FIT_CR_SOFT_RESET
, FIT_CONTROL
);
3583 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3585 /* poll every 100ms, 1 second timeout */
3586 for (i
= 0; i
< 10; i
++) {
3588 SKD_READL(skdev
, FIT_STATUS
) & FIT_SR_DRIVE_STATE_MASK
;
3589 if (dev_state
== FIT_SR_DRIVE_INIT
)
3591 set_current_state(TASK_INTERRUPTIBLE
);
3592 schedule_timeout(msecs_to_jiffies(100));
3595 if (dev_state
!= FIT_SR_DRIVE_INIT
)
3596 pr_err("(%s): skd_stop_device state error 0x%02x\n",
3597 skd_name(skdev
), dev_state
);
3600 /* assume spinlock is held */
3601 static void skd_restart_device(struct skd_device
*skdev
)
3605 /* ack all ghost interrupts */
3606 SKD_WRITEL(skdev
, FIT_INT_DEF_MASK
, FIT_INT_STATUS_HOST
);
3608 state
= SKD_READL(skdev
, FIT_STATUS
);
3610 pr_debug("%s:%s:%d drive status=0x%x\n",
3611 skdev
->name
, __func__
, __LINE__
, state
);
3613 state
&= FIT_SR_DRIVE_STATE_MASK
;
3614 skdev
->drive_state
= state
;
3615 skdev
->last_mtd
= 0;
3617 skdev
->state
= SKD_DRVR_STATE_RESTARTING
;
3618 skdev
->timer_countdown
= SKD_RESTARTING_TIMO
;
3620 skd_soft_reset(skdev
);
3623 /* assume spinlock is held */
3624 static int skd_quiesce_dev(struct skd_device
*skdev
)
3628 switch (skdev
->state
) {
3629 case SKD_DRVR_STATE_BUSY
:
3630 case SKD_DRVR_STATE_BUSY_IMMINENT
:
3631 pr_debug("%s:%s:%d stopping %s queue\n",
3632 skdev
->name
, __func__
, __LINE__
, skdev
->name
);
3633 blk_stop_queue(skdev
->queue
);
3635 case SKD_DRVR_STATE_ONLINE
:
3636 case SKD_DRVR_STATE_STOPPING
:
3637 case SKD_DRVR_STATE_SYNCING
:
3638 case SKD_DRVR_STATE_PAUSING
:
3639 case SKD_DRVR_STATE_PAUSED
:
3640 case SKD_DRVR_STATE_STARTING
:
3641 case SKD_DRVR_STATE_RESTARTING
:
3642 case SKD_DRVR_STATE_RESUMING
:
3645 pr_debug("%s:%s:%d state [%d] not implemented\n",
3646 skdev
->name
, __func__
, __LINE__
, skdev
->state
);
3651 /* assume spinlock is held */
3652 static int skd_unquiesce_dev(struct skd_device
*skdev
)
3654 int prev_driver_state
= skdev
->state
;
3656 skd_log_skdev(skdev
, "unquiesce");
3657 if (skdev
->state
== SKD_DRVR_STATE_ONLINE
) {
3658 pr_debug("%s:%s:%d **** device already ONLINE\n",
3659 skdev
->name
, __func__
, __LINE__
);
3662 if (skdev
->drive_state
!= FIT_SR_DRIVE_ONLINE
) {
3664 * If there has been an state change to other than
3665 * ONLINE, we will rely on controller state change
3666 * to come back online and restart the queue.
3667 * The BUSY state means that driver is ready to
3668 * continue normal processing but waiting for controller
3669 * to become available.
3671 skdev
->state
= SKD_DRVR_STATE_BUSY
;
3672 pr_debug("%s:%s:%d drive BUSY state\n",
3673 skdev
->name
, __func__
, __LINE__
);
3678 * Drive has just come online, driver is either in startup,
3679 * paused performing a task, or bust waiting for hardware.
3681 switch (skdev
->state
) {
3682 case SKD_DRVR_STATE_PAUSED
:
3683 case SKD_DRVR_STATE_BUSY
:
3684 case SKD_DRVR_STATE_BUSY_IMMINENT
:
3685 case SKD_DRVR_STATE_BUSY_ERASE
:
3686 case SKD_DRVR_STATE_STARTING
:
3687 case SKD_DRVR_STATE_RESTARTING
:
3688 case SKD_DRVR_STATE_FAULT
:
3689 case SKD_DRVR_STATE_IDLE
:
3690 case SKD_DRVR_STATE_LOAD
:
3691 skdev
->state
= SKD_DRVR_STATE_ONLINE
;
3692 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3694 skd_skdev_state_to_str(prev_driver_state
),
3695 prev_driver_state
, skd_skdev_state_to_str(skdev
->state
),
3697 pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
3698 skdev
->name
, __func__
, __LINE__
);
3699 pr_debug("%s:%s:%d starting %s queue\n",
3700 skdev
->name
, __func__
, __LINE__
, skdev
->name
);
3701 pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev
));
3702 blk_start_queue(skdev
->queue
);
3703 skdev
->gendisk_on
= 1;
3704 wake_up_interruptible(&skdev
->waitq
);
3707 case SKD_DRVR_STATE_DISAPPEARED
:
3709 pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
3710 skdev
->name
, __func__
, __LINE__
,
3718 *****************************************************************************
3719 * PCIe MSI/MSI-X INTERRUPT HANDLERS
3720 *****************************************************************************
3723 static irqreturn_t
skd_reserved_isr(int irq
, void *skd_host_data
)
3725 struct skd_device
*skdev
= skd_host_data
;
3726 unsigned long flags
;
3728 spin_lock_irqsave(&skdev
->lock
, flags
);
3729 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3730 skdev
->name
, __func__
, __LINE__
,
3731 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
3732 pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev
),
3733 irq
, SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
3734 SKD_WRITEL(skdev
, FIT_INT_RESERVED_MASK
, FIT_INT_STATUS_HOST
);
3735 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3739 static irqreturn_t
skd_statec_isr(int irq
, void *skd_host_data
)
3741 struct skd_device
*skdev
= skd_host_data
;
3742 unsigned long flags
;
3744 spin_lock_irqsave(&skdev
->lock
, flags
);
3745 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3746 skdev
->name
, __func__
, __LINE__
,
3747 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
3748 SKD_WRITEL(skdev
, FIT_ISH_FW_STATE_CHANGE
, FIT_INT_STATUS_HOST
);
3749 skd_isr_fwstate(skdev
);
3750 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3754 static irqreturn_t
skd_comp_q(int irq
, void *skd_host_data
)
3756 struct skd_device
*skdev
= skd_host_data
;
3757 unsigned long flags
;
3758 int flush_enqueued
= 0;
3761 spin_lock_irqsave(&skdev
->lock
, flags
);
3762 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3763 skdev
->name
, __func__
, __LINE__
,
3764 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
3765 SKD_WRITEL(skdev
, FIT_ISH_COMPLETION_POSTED
, FIT_INT_STATUS_HOST
);
3766 deferred
= skd_isr_completion_posted(skdev
, skd_isr_comp_limit
,
3769 skd_request_fn(skdev
->queue
);
3772 schedule_work(&skdev
->completion_worker
);
3773 else if (!flush_enqueued
)
3774 skd_request_fn(skdev
->queue
);
3776 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3781 static irqreturn_t
skd_msg_isr(int irq
, void *skd_host_data
)
3783 struct skd_device
*skdev
= skd_host_data
;
3784 unsigned long flags
;
3786 spin_lock_irqsave(&skdev
->lock
, flags
);
3787 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3788 skdev
->name
, __func__
, __LINE__
,
3789 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
3790 SKD_WRITEL(skdev
, FIT_ISH_MSG_FROM_DEV
, FIT_INT_STATUS_HOST
);
3791 skd_isr_msg_from_dev(skdev
);
3792 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3796 static irqreturn_t
skd_qfull_isr(int irq
, void *skd_host_data
)
3798 struct skd_device
*skdev
= skd_host_data
;
3799 unsigned long flags
;
3801 spin_lock_irqsave(&skdev
->lock
, flags
);
3802 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3803 skdev
->name
, __func__
, __LINE__
,
3804 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
3805 SKD_WRITEL(skdev
, FIT_INT_QUEUE_FULL
, FIT_INT_STATUS_HOST
);
3806 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3811 *****************************************************************************
3812 * PCIe MSI/MSI-X SETUP
3813 *****************************************************************************
3816 struct skd_msix_entry
{
3820 struct skd_init_msix_entry
{
3822 irq_handler_t handler
;
3825 #define SKD_MAX_MSIX_COUNT 13
3826 #define SKD_MIN_MSIX_COUNT 7
3827 #define SKD_BASE_MSIX_IRQ 4
3829 static struct skd_init_msix_entry msix_entries
[SKD_MAX_MSIX_COUNT
] = {
3830 { "(DMA 0)", skd_reserved_isr
},
3831 { "(DMA 1)", skd_reserved_isr
},
3832 { "(DMA 2)", skd_reserved_isr
},
3833 { "(DMA 3)", skd_reserved_isr
},
3834 { "(State Change)", skd_statec_isr
},
3835 { "(COMPL_Q)", skd_comp_q
},
3836 { "(MSG)", skd_msg_isr
},
3837 { "(Reserved)", skd_reserved_isr
},
3838 { "(Reserved)", skd_reserved_isr
},
3839 { "(Queue Full 0)", skd_qfull_isr
},
3840 { "(Queue Full 1)", skd_qfull_isr
},
3841 { "(Queue Full 2)", skd_qfull_isr
},
3842 { "(Queue Full 3)", skd_qfull_isr
},
3845 static int skd_acquire_msix(struct skd_device
*skdev
)
3848 struct pci_dev
*pdev
= skdev
->pdev
;
3850 rc
= pci_alloc_irq_vectors(pdev
, SKD_MAX_MSIX_COUNT
, SKD_MAX_MSIX_COUNT
,
3853 pr_err("(%s): failed to enable MSI-X %d\n",
3854 skd_name(skdev
), rc
);
3858 skdev
->msix_entries
= kcalloc(SKD_MAX_MSIX_COUNT
,
3859 sizeof(struct skd_msix_entry
), GFP_KERNEL
);
3860 if (!skdev
->msix_entries
) {
3862 pr_err("(%s): msix table allocation error\n",
3867 /* Enable MSI-X vectors for the base queue */
3868 for (i
= 0; i
< SKD_MAX_MSIX_COUNT
; i
++) {
3869 struct skd_msix_entry
*qentry
= &skdev
->msix_entries
[i
];
3871 snprintf(qentry
->isr_name
, sizeof(qentry
->isr_name
),
3872 "%s%d-msix %s", DRV_NAME
, skdev
->devno
,
3873 msix_entries
[i
].name
);
3875 rc
= devm_request_irq(&skdev
->pdev
->dev
,
3876 pci_irq_vector(skdev
->pdev
, i
),
3877 msix_entries
[i
].handler
, 0,
3878 qentry
->isr_name
, skdev
);
3880 pr_err("(%s): Unable to register(%d) MSI-X "
3882 skd_name(skdev
), rc
, i
, qentry
->isr_name
);
3887 pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
3888 skdev
->name
, __func__
, __LINE__
,
3889 pci_name(pdev
), skdev
->name
, SKD_MAX_MSIX_COUNT
);
3894 devm_free_irq(&pdev
->dev
, pci_irq_vector(pdev
, i
), skdev
);
3896 kfree(skdev
->msix_entries
);
3897 skdev
->msix_entries
= NULL
;
3901 static int skd_acquire_irq(struct skd_device
*skdev
)
3903 struct pci_dev
*pdev
= skdev
->pdev
;
3904 unsigned int irq_flag
= PCI_IRQ_LEGACY
;
3907 if (skd_isr_type
== SKD_IRQ_MSIX
) {
3908 rc
= skd_acquire_msix(skdev
);
3912 pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
3913 skd_name(skdev
), rc
);
3916 snprintf(skdev
->isr_name
, sizeof(skdev
->isr_name
), "%s%d", DRV_NAME
,
3919 if (skd_isr_type
!= SKD_IRQ_LEGACY
)
3920 irq_flag
|= PCI_IRQ_MSI
;
3921 rc
= pci_alloc_irq_vectors(pdev
, 1, 1, irq_flag
);
3923 pr_err("(%s): failed to allocate the MSI interrupt %d\n",
3924 skd_name(skdev
), rc
);
3928 rc
= devm_request_irq(&pdev
->dev
, pdev
->irq
, skd_isr
,
3929 pdev
->msi_enabled
? 0 : IRQF_SHARED
,
3930 skdev
->isr_name
, skdev
);
3932 pci_free_irq_vectors(pdev
);
3933 pr_err("(%s): failed to allocate interrupt %d\n",
3934 skd_name(skdev
), rc
);
3941 static void skd_release_irq(struct skd_device
*skdev
)
3943 struct pci_dev
*pdev
= skdev
->pdev
;
3945 if (skdev
->msix_entries
) {
3948 for (i
= 0; i
< SKD_MAX_MSIX_COUNT
; i
++) {
3949 devm_free_irq(&pdev
->dev
, pci_irq_vector(pdev
, i
),
3953 kfree(skdev
->msix_entries
);
3954 skdev
->msix_entries
= NULL
;
3956 devm_free_irq(&pdev
->dev
, pdev
->irq
, skdev
);
3959 pci_free_irq_vectors(pdev
);
3963 *****************************************************************************
3965 *****************************************************************************
3968 static int skd_cons_skcomp(struct skd_device
*skdev
)
3971 struct fit_completion_entry_v1
*skcomp
;
3974 nbytes
= sizeof(*skcomp
) * SKD_N_COMPLETION_ENTRY
;
3975 nbytes
+= sizeof(struct fit_comp_error_info
) * SKD_N_COMPLETION_ENTRY
;
3977 pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
3978 skdev
->name
, __func__
, __LINE__
,
3979 nbytes
, SKD_N_COMPLETION_ENTRY
);
3981 skcomp
= pci_zalloc_consistent(skdev
->pdev
, nbytes
,
3982 &skdev
->cq_dma_address
);
3984 if (skcomp
== NULL
) {
3989 skdev
->skcomp_table
= skcomp
;
3990 skdev
->skerr_table
= (struct fit_comp_error_info
*)((char *)skcomp
+
3992 SKD_N_COMPLETION_ENTRY
);
3998 static int skd_cons_skmsg(struct skd_device
*skdev
)
4003 pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
4004 skdev
->name
, __func__
, __LINE__
,
4005 sizeof(struct skd_fitmsg_context
),
4006 skdev
->num_fitmsg_context
,
4007 sizeof(struct skd_fitmsg_context
) * skdev
->num_fitmsg_context
);
4009 skdev
->skmsg_table
= kzalloc(sizeof(struct skd_fitmsg_context
)
4010 *skdev
->num_fitmsg_context
, GFP_KERNEL
);
4011 if (skdev
->skmsg_table
== NULL
) {
4016 for (i
= 0; i
< skdev
->num_fitmsg_context
; i
++) {
4017 struct skd_fitmsg_context
*skmsg
;
4019 skmsg
= &skdev
->skmsg_table
[i
];
4021 skmsg
->id
= i
+ SKD_ID_FIT_MSG
;
4023 skmsg
->state
= SKD_MSG_STATE_IDLE
;
4024 skmsg
->msg_buf
= pci_alloc_consistent(skdev
->pdev
,
4025 SKD_N_FITMSG_BYTES
+ 64,
4026 &skmsg
->mb_dma_address
);
4028 if (skmsg
->msg_buf
== NULL
) {
4033 skmsg
->offset
= (u32
)((u64
)skmsg
->msg_buf
&
4034 (~FIT_QCMD_BASE_ADDRESS_MASK
));
4035 skmsg
->msg_buf
+= ~FIT_QCMD_BASE_ADDRESS_MASK
;
4036 skmsg
->msg_buf
= (u8
*)((u64
)skmsg
->msg_buf
&
4037 FIT_QCMD_BASE_ADDRESS_MASK
);
4038 skmsg
->mb_dma_address
+= ~FIT_QCMD_BASE_ADDRESS_MASK
;
4039 skmsg
->mb_dma_address
&= FIT_QCMD_BASE_ADDRESS_MASK
;
4040 memset(skmsg
->msg_buf
, 0, SKD_N_FITMSG_BYTES
);
4042 skmsg
->next
= &skmsg
[1];
4045 /* Free list is in order starting with the 0th entry. */
4046 skdev
->skmsg_table
[i
- 1].next
= NULL
;
4047 skdev
->skmsg_free_list
= skdev
->skmsg_table
;
4053 static struct fit_sg_descriptor
*skd_cons_sg_list(struct skd_device
*skdev
,
4055 dma_addr_t
*ret_dma_addr
)
4057 struct fit_sg_descriptor
*sg_list
;
4060 nbytes
= sizeof(*sg_list
) * n_sg
;
4062 sg_list
= pci_alloc_consistent(skdev
->pdev
, nbytes
, ret_dma_addr
);
4064 if (sg_list
!= NULL
) {
4065 uint64_t dma_address
= *ret_dma_addr
;
4068 memset(sg_list
, 0, nbytes
);
4070 for (i
= 0; i
< n_sg
- 1; i
++) {
4072 ndp_off
= (i
+ 1) * sizeof(struct fit_sg_descriptor
);
4074 sg_list
[i
].next_desc_ptr
= dma_address
+ ndp_off
;
4076 sg_list
[i
].next_desc_ptr
= 0LL;
4082 static int skd_cons_skreq(struct skd_device
*skdev
)
4087 pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
4088 skdev
->name
, __func__
, __LINE__
,
4089 sizeof(struct skd_request_context
),
4090 skdev
->num_req_context
,
4091 sizeof(struct skd_request_context
) * skdev
->num_req_context
);
4093 skdev
->skreq_table
= kzalloc(sizeof(struct skd_request_context
)
4094 * skdev
->num_req_context
, GFP_KERNEL
);
4095 if (skdev
->skreq_table
== NULL
) {
4100 pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
4101 skdev
->name
, __func__
, __LINE__
,
4102 skdev
->sgs_per_request
, sizeof(struct scatterlist
),
4103 skdev
->sgs_per_request
* sizeof(struct scatterlist
));
4105 for (i
= 0; i
< skdev
->num_req_context
; i
++) {
4106 struct skd_request_context
*skreq
;
4108 skreq
= &skdev
->skreq_table
[i
];
4110 skreq
->id
= i
+ SKD_ID_RW_REQUEST
;
4111 skreq
->state
= SKD_REQ_STATE_IDLE
;
4113 skreq
->sg
= kzalloc(sizeof(struct scatterlist
) *
4114 skdev
->sgs_per_request
, GFP_KERNEL
);
4115 if (skreq
->sg
== NULL
) {
4119 sg_init_table(skreq
->sg
, skdev
->sgs_per_request
);
4121 skreq
->sksg_list
= skd_cons_sg_list(skdev
,
4122 skdev
->sgs_per_request
,
4123 &skreq
->sksg_dma_address
);
4125 if (skreq
->sksg_list
== NULL
) {
4130 skreq
->next
= &skreq
[1];
4133 /* Free list is in order starting with the 0th entry. */
4134 skdev
->skreq_table
[i
- 1].next
= NULL
;
4135 skdev
->skreq_free_list
= skdev
->skreq_table
;
4141 static int skd_cons_skspcl(struct skd_device
*skdev
)
4146 pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4147 skdev
->name
, __func__
, __LINE__
,
4148 sizeof(struct skd_special_context
),
4150 sizeof(struct skd_special_context
) * skdev
->n_special
);
4152 skdev
->skspcl_table
= kzalloc(sizeof(struct skd_special_context
)
4153 * skdev
->n_special
, GFP_KERNEL
);
4154 if (skdev
->skspcl_table
== NULL
) {
4159 for (i
= 0; i
< skdev
->n_special
; i
++) {
4160 struct skd_special_context
*skspcl
;
4162 skspcl
= &skdev
->skspcl_table
[i
];
4164 skspcl
->req
.id
= i
+ SKD_ID_SPECIAL_REQUEST
;
4165 skspcl
->req
.state
= SKD_REQ_STATE_IDLE
;
4167 skspcl
->req
.next
= &skspcl
[1].req
;
4169 nbytes
= SKD_N_SPECIAL_FITMSG_BYTES
;
4172 pci_zalloc_consistent(skdev
->pdev
, nbytes
,
4173 &skspcl
->mb_dma_address
);
4174 if (skspcl
->msg_buf
== NULL
) {
4179 skspcl
->req
.sg
= kzalloc(sizeof(struct scatterlist
) *
4180 SKD_N_SG_PER_SPECIAL
, GFP_KERNEL
);
4181 if (skspcl
->req
.sg
== NULL
) {
4186 skspcl
->req
.sksg_list
= skd_cons_sg_list(skdev
,
4187 SKD_N_SG_PER_SPECIAL
,
4190 if (skspcl
->req
.sksg_list
== NULL
) {
4196 /* Free list is in order starting with the 0th entry. */
4197 skdev
->skspcl_table
[i
- 1].req
.next
= NULL
;
4198 skdev
->skspcl_free_list
= skdev
->skspcl_table
;
4206 static int skd_cons_sksb(struct skd_device
*skdev
)
4209 struct skd_special_context
*skspcl
;
4212 skspcl
= &skdev
->internal_skspcl
;
4214 skspcl
->req
.id
= 0 + SKD_ID_INTERNAL
;
4215 skspcl
->req
.state
= SKD_REQ_STATE_IDLE
;
4217 nbytes
= SKD_N_INTERNAL_BYTES
;
4219 skspcl
->data_buf
= pci_zalloc_consistent(skdev
->pdev
, nbytes
,
4220 &skspcl
->db_dma_address
);
4221 if (skspcl
->data_buf
== NULL
) {
4226 nbytes
= SKD_N_SPECIAL_FITMSG_BYTES
;
4227 skspcl
->msg_buf
= pci_zalloc_consistent(skdev
->pdev
, nbytes
,
4228 &skspcl
->mb_dma_address
);
4229 if (skspcl
->msg_buf
== NULL
) {
4234 skspcl
->req
.sksg_list
= skd_cons_sg_list(skdev
, 1,
4235 &skspcl
->req
.sksg_dma_address
);
4236 if (skspcl
->req
.sksg_list
== NULL
) {
4241 if (!skd_format_internal_skspcl(skdev
)) {
4250 static int skd_cons_disk(struct skd_device
*skdev
)
4253 struct gendisk
*disk
;
4254 struct request_queue
*q
;
4255 unsigned long flags
;
4257 disk
= alloc_disk(SKD_MINORS_PER_DEVICE
);
4264 sprintf(disk
->disk_name
, DRV_NAME
"%u", skdev
->devno
);
4266 disk
->major
= skdev
->major
;
4267 disk
->first_minor
= skdev
->devno
* SKD_MINORS_PER_DEVICE
;
4268 disk
->fops
= &skd_blockdev_ops
;
4269 disk
->private_data
= skdev
;
4271 q
= blk_init_queue(skd_request_fn
, &skdev
->lock
);
4279 q
->queuedata
= skdev
;
4281 blk_queue_write_cache(q
, true, true);
4282 blk_queue_max_segments(q
, skdev
->sgs_per_request
);
4283 blk_queue_max_hw_sectors(q
, SKD_N_MAX_SECTORS
);
4285 /* set sysfs ptimal_io_size to 8K */
4286 blk_queue_io_opt(q
, 8192);
4288 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, q
);
4289 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, q
);
4291 spin_lock_irqsave(&skdev
->lock
, flags
);
4292 pr_debug("%s:%s:%d stopping %s queue\n",
4293 skdev
->name
, __func__
, __LINE__
, skdev
->name
);
4294 blk_stop_queue(skdev
->queue
);
4295 spin_unlock_irqrestore(&skdev
->lock
, flags
);
4301 #define SKD_N_DEV_TABLE 16u
4302 static u32 skd_next_devno
;
4304 static struct skd_device
*skd_construct(struct pci_dev
*pdev
)
4306 struct skd_device
*skdev
;
4307 int blk_major
= skd_major
;
4310 skdev
= kzalloc(sizeof(*skdev
), GFP_KERNEL
);
4313 pr_err(PFX
"(%s): memory alloc failure\n",
4318 skdev
->state
= SKD_DRVR_STATE_LOAD
;
4320 skdev
->devno
= skd_next_devno
++;
4321 skdev
->major
= blk_major
;
4322 sprintf(skdev
->name
, DRV_NAME
"%d", skdev
->devno
);
4323 skdev
->dev_max_queue_depth
= 0;
4325 skdev
->num_req_context
= skd_max_queue_depth
;
4326 skdev
->num_fitmsg_context
= skd_max_queue_depth
;
4327 skdev
->n_special
= skd_max_pass_thru
;
4328 skdev
->cur_max_queue_depth
= 1;
4329 skdev
->queue_low_water_mark
= 1;
4330 skdev
->proto_ver
= 99;
4331 skdev
->sgs_per_request
= skd_sgs_per_request
;
4332 skdev
->dbg_level
= skd_dbg_level
;
4334 atomic_set(&skdev
->device_count
, 0);
4336 spin_lock_init(&skdev
->lock
);
4338 INIT_WORK(&skdev
->completion_worker
, skd_completion_worker
);
4340 pr_debug("%s:%s:%d skcomp\n", skdev
->name
, __func__
, __LINE__
);
4341 rc
= skd_cons_skcomp(skdev
);
4345 pr_debug("%s:%s:%d skmsg\n", skdev
->name
, __func__
, __LINE__
);
4346 rc
= skd_cons_skmsg(skdev
);
4350 pr_debug("%s:%s:%d skreq\n", skdev
->name
, __func__
, __LINE__
);
4351 rc
= skd_cons_skreq(skdev
);
4355 pr_debug("%s:%s:%d skspcl\n", skdev
->name
, __func__
, __LINE__
);
4356 rc
= skd_cons_skspcl(skdev
);
4360 pr_debug("%s:%s:%d sksb\n", skdev
->name
, __func__
, __LINE__
);
4361 rc
= skd_cons_sksb(skdev
);
4365 pr_debug("%s:%s:%d disk\n", skdev
->name
, __func__
, __LINE__
);
4366 rc
= skd_cons_disk(skdev
);
4370 pr_debug("%s:%s:%d VICTORY\n", skdev
->name
, __func__
, __LINE__
);
4374 pr_debug("%s:%s:%d construct failed\n",
4375 skdev
->name
, __func__
, __LINE__
);
4376 skd_destruct(skdev
);
4381 *****************************************************************************
4383 *****************************************************************************
4386 static void skd_free_skcomp(struct skd_device
*skdev
)
4388 if (skdev
->skcomp_table
!= NULL
) {
4391 nbytes
= sizeof(skdev
->skcomp_table
[0]) *
4392 SKD_N_COMPLETION_ENTRY
;
4393 pci_free_consistent(skdev
->pdev
, nbytes
,
4394 skdev
->skcomp_table
, skdev
->cq_dma_address
);
4397 skdev
->skcomp_table
= NULL
;
4398 skdev
->cq_dma_address
= 0;
4401 static void skd_free_skmsg(struct skd_device
*skdev
)
4405 if (skdev
->skmsg_table
== NULL
)
4408 for (i
= 0; i
< skdev
->num_fitmsg_context
; i
++) {
4409 struct skd_fitmsg_context
*skmsg
;
4411 skmsg
= &skdev
->skmsg_table
[i
];
4413 if (skmsg
->msg_buf
!= NULL
) {
4414 skmsg
->msg_buf
+= skmsg
->offset
;
4415 skmsg
->mb_dma_address
+= skmsg
->offset
;
4416 pci_free_consistent(skdev
->pdev
, SKD_N_FITMSG_BYTES
,
4418 skmsg
->mb_dma_address
);
4420 skmsg
->msg_buf
= NULL
;
4421 skmsg
->mb_dma_address
= 0;
4424 kfree(skdev
->skmsg_table
);
4425 skdev
->skmsg_table
= NULL
;
4428 static void skd_free_sg_list(struct skd_device
*skdev
,
4429 struct fit_sg_descriptor
*sg_list
,
4430 u32 n_sg
, dma_addr_t dma_addr
)
4432 if (sg_list
!= NULL
) {
4435 nbytes
= sizeof(*sg_list
) * n_sg
;
4437 pci_free_consistent(skdev
->pdev
, nbytes
, sg_list
, dma_addr
);
4441 static void skd_free_skreq(struct skd_device
*skdev
)
4445 if (skdev
->skreq_table
== NULL
)
4448 for (i
= 0; i
< skdev
->num_req_context
; i
++) {
4449 struct skd_request_context
*skreq
;
4451 skreq
= &skdev
->skreq_table
[i
];
4453 skd_free_sg_list(skdev
, skreq
->sksg_list
,
4454 skdev
->sgs_per_request
,
4455 skreq
->sksg_dma_address
);
4457 skreq
->sksg_list
= NULL
;
4458 skreq
->sksg_dma_address
= 0;
4463 kfree(skdev
->skreq_table
);
4464 skdev
->skreq_table
= NULL
;
4467 static void skd_free_skspcl(struct skd_device
*skdev
)
4472 if (skdev
->skspcl_table
== NULL
)
4475 for (i
= 0; i
< skdev
->n_special
; i
++) {
4476 struct skd_special_context
*skspcl
;
4478 skspcl
= &skdev
->skspcl_table
[i
];
4480 if (skspcl
->msg_buf
!= NULL
) {
4481 nbytes
= SKD_N_SPECIAL_FITMSG_BYTES
;
4482 pci_free_consistent(skdev
->pdev
, nbytes
,
4484 skspcl
->mb_dma_address
);
4487 skspcl
->msg_buf
= NULL
;
4488 skspcl
->mb_dma_address
= 0;
4490 skd_free_sg_list(skdev
, skspcl
->req
.sksg_list
,
4491 SKD_N_SG_PER_SPECIAL
,
4492 skspcl
->req
.sksg_dma_address
);
4494 skspcl
->req
.sksg_list
= NULL
;
4495 skspcl
->req
.sksg_dma_address
= 0;
4497 kfree(skspcl
->req
.sg
);
4500 kfree(skdev
->skspcl_table
);
4501 skdev
->skspcl_table
= NULL
;
4504 static void skd_free_sksb(struct skd_device
*skdev
)
4506 struct skd_special_context
*skspcl
;
4509 skspcl
= &skdev
->internal_skspcl
;
4511 if (skspcl
->data_buf
!= NULL
) {
4512 nbytes
= SKD_N_INTERNAL_BYTES
;
4514 pci_free_consistent(skdev
->pdev
, nbytes
,
4515 skspcl
->data_buf
, skspcl
->db_dma_address
);
4518 skspcl
->data_buf
= NULL
;
4519 skspcl
->db_dma_address
= 0;
4521 if (skspcl
->msg_buf
!= NULL
) {
4522 nbytes
= SKD_N_SPECIAL_FITMSG_BYTES
;
4523 pci_free_consistent(skdev
->pdev
, nbytes
,
4524 skspcl
->msg_buf
, skspcl
->mb_dma_address
);
4527 skspcl
->msg_buf
= NULL
;
4528 skspcl
->mb_dma_address
= 0;
4530 skd_free_sg_list(skdev
, skspcl
->req
.sksg_list
, 1,
4531 skspcl
->req
.sksg_dma_address
);
4533 skspcl
->req
.sksg_list
= NULL
;
4534 skspcl
->req
.sksg_dma_address
= 0;
4537 static void skd_free_disk(struct skd_device
*skdev
)
4539 struct gendisk
*disk
= skdev
->disk
;
4542 struct request_queue
*q
= disk
->queue
;
4544 if (disk
->flags
& GENHD_FL_UP
)
4547 blk_cleanup_queue(q
);
4553 static void skd_destruct(struct skd_device
*skdev
)
4559 pr_debug("%s:%s:%d disk\n", skdev
->name
, __func__
, __LINE__
);
4560 skd_free_disk(skdev
);
4562 pr_debug("%s:%s:%d sksb\n", skdev
->name
, __func__
, __LINE__
);
4563 skd_free_sksb(skdev
);
4565 pr_debug("%s:%s:%d skspcl\n", skdev
->name
, __func__
, __LINE__
);
4566 skd_free_skspcl(skdev
);
4568 pr_debug("%s:%s:%d skreq\n", skdev
->name
, __func__
, __LINE__
);
4569 skd_free_skreq(skdev
);
4571 pr_debug("%s:%s:%d skmsg\n", skdev
->name
, __func__
, __LINE__
);
4572 skd_free_skmsg(skdev
);
4574 pr_debug("%s:%s:%d skcomp\n", skdev
->name
, __func__
, __LINE__
);
4575 skd_free_skcomp(skdev
);
4577 pr_debug("%s:%s:%d skdev\n", skdev
->name
, __func__
, __LINE__
);
4582 *****************************************************************************
4583 * BLOCK DEVICE (BDEV) GLUE
4584 *****************************************************************************
4587 static int skd_bdev_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
4589 struct skd_device
*skdev
;
4592 skdev
= bdev
->bd_disk
->private_data
;
4594 pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
4595 skdev
->name
, __func__
, __LINE__
,
4596 bdev
->bd_disk
->disk_name
, current
->comm
);
4598 if (skdev
->read_cap_is_valid
) {
4599 capacity
= get_capacity(skdev
->disk
);
4602 geo
->cylinders
= (capacity
) / (255 * 64);
4609 static int skd_bdev_attach(struct device
*parent
, struct skd_device
*skdev
)
4611 pr_debug("%s:%s:%d add_disk\n", skdev
->name
, __func__
, __LINE__
);
4612 device_add_disk(parent
, skdev
->disk
);
4616 static const struct block_device_operations skd_blockdev_ops
= {
4617 .owner
= THIS_MODULE
,
4618 .ioctl
= skd_bdev_ioctl
,
4619 .getgeo
= skd_bdev_getgeo
,
4624 *****************************************************************************
4626 *****************************************************************************
4629 static const struct pci_device_id skd_pci_tbl
[] = {
4630 { PCI_VENDOR_ID_STEC
, PCI_DEVICE_ID_S1120
,
4631 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
4632 { 0 } /* terminate list */
4635 MODULE_DEVICE_TABLE(pci
, skd_pci_tbl
);
4637 static char *skd_pci_info(struct skd_device
*skdev
, char *str
)
4641 strcpy(str
, "PCIe (");
4642 pcie_reg
= pci_find_capability(skdev
->pdev
, PCI_CAP_ID_EXP
);
4647 uint16_t pcie_lstat
, lspeed
, lwidth
;
4650 pci_read_config_word(skdev
->pdev
, pcie_reg
, &pcie_lstat
);
4651 lspeed
= pcie_lstat
& (0xF);
4652 lwidth
= (pcie_lstat
& 0x3F0) >> 4;
4655 strcat(str
, "2.5GT/s ");
4656 else if (lspeed
== 2)
4657 strcat(str
, "5.0GT/s ");
4659 strcat(str
, "<unknown> ");
4660 snprintf(lwstr
, sizeof(lwstr
), "%dX)", lwidth
);
4666 static int skd_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
4671 struct skd_device
*skdev
;
4673 pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
4674 DRV_NAME
, DRV_VERSION
, DRV_BUILD_ID
);
4675 pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
4676 pci_name(pdev
), pdev
->vendor
, pdev
->device
);
4678 rc
= pci_enable_device(pdev
);
4681 rc
= pci_request_regions(pdev
, DRV_NAME
);
4684 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
4686 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4688 pr_err("(%s): consistent DMA mask error %d\n",
4689 pci_name(pdev
), rc
);
4692 (rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)));
4695 pr_err("(%s): DMA mask error %d\n",
4696 pci_name(pdev
), rc
);
4697 goto err_out_regions
;
4702 rc
= register_blkdev(0, DRV_NAME
);
4704 goto err_out_regions
;
4709 skdev
= skd_construct(pdev
);
4710 if (skdev
== NULL
) {
4712 goto err_out_regions
;
4715 skd_pci_info(skdev
, pci_str
);
4716 pr_info("(%s): %s 64bit\n", skd_name(skdev
), pci_str
);
4718 pci_set_master(pdev
);
4719 rc
= pci_enable_pcie_error_reporting(pdev
);
4722 "(%s): bad enable of PCIe error reporting rc=%d\n",
4723 skd_name(skdev
), rc
);
4724 skdev
->pcie_error_reporting_is_enabled
= 0;
4726 skdev
->pcie_error_reporting_is_enabled
= 1;
4729 pci_set_drvdata(pdev
, skdev
);
4731 for (i
= 0; i
< SKD_MAX_BARS
; i
++) {
4732 skdev
->mem_phys
[i
] = pci_resource_start(pdev
, i
);
4733 skdev
->mem_size
[i
] = (u32
)pci_resource_len(pdev
, i
);
4734 skdev
->mem_map
[i
] = ioremap(skdev
->mem_phys
[i
],
4735 skdev
->mem_size
[i
]);
4736 if (!skdev
->mem_map
[i
]) {
4737 pr_err("(%s): Unable to map adapter memory!\n",
4740 goto err_out_iounmap
;
4742 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4743 skdev
->name
, __func__
, __LINE__
,
4745 (uint64_t)skdev
->mem_phys
[i
], skdev
->mem_size
[i
]);
4748 rc
= skd_acquire_irq(skdev
);
4750 pr_err("(%s): interrupt resource error %d\n",
4751 skd_name(skdev
), rc
);
4752 goto err_out_iounmap
;
4755 rc
= skd_start_timer(skdev
);
4759 init_waitqueue_head(&skdev
->waitq
);
4761 skd_start_device(skdev
);
4763 rc
= wait_event_interruptible_timeout(skdev
->waitq
,
4764 (skdev
->gendisk_on
),
4765 (SKD_START_WAIT_SECONDS
* HZ
));
4766 if (skdev
->gendisk_on
> 0) {
4767 /* device came on-line after reset */
4768 skd_bdev_attach(&pdev
->dev
, skdev
);
4771 /* we timed out, something is wrong with the device,
4772 don't add the disk structure */
4774 "(%s): error: waiting for s1120 timed out %d!\n",
4775 skd_name(skdev
), rc
);
4776 /* in case of no error; we timeout with ENXIO */
4783 #ifdef SKD_VMK_POLL_HANDLER
4784 if (skdev
->irq_type
== SKD_IRQ_MSIX
) {
4785 /* MSIX completion handler is being used for coredump */
4786 vmklnx_scsi_register_poll_handler(skdev
->scsi_host
,
4787 skdev
->msix_entries
[5].vector
,
4790 vmklnx_scsi_register_poll_handler(skdev
->scsi_host
,
4791 skdev
->pdev
->irq
, skd_isr
,
4794 #endif /* SKD_VMK_POLL_HANDLER */
4799 skd_stop_device(skdev
);
4800 skd_release_irq(skdev
);
4803 for (i
= 0; i
< SKD_MAX_BARS
; i
++)
4804 if (skdev
->mem_map
[i
])
4805 iounmap(skdev
->mem_map
[i
]);
4807 if (skdev
->pcie_error_reporting_is_enabled
)
4808 pci_disable_pcie_error_reporting(pdev
);
4810 skd_destruct(skdev
);
4813 pci_release_regions(pdev
);
4816 pci_disable_device(pdev
);
4817 pci_set_drvdata(pdev
, NULL
);
4821 static void skd_pci_remove(struct pci_dev
*pdev
)
4824 struct skd_device
*skdev
;
4826 skdev
= pci_get_drvdata(pdev
);
4828 pr_err("%s: no device data for PCI\n", pci_name(pdev
));
4831 skd_stop_device(skdev
);
4832 skd_release_irq(skdev
);
4834 for (i
= 0; i
< SKD_MAX_BARS
; i
++)
4835 if (skdev
->mem_map
[i
])
4836 iounmap((u32
*)skdev
->mem_map
[i
]);
4838 if (skdev
->pcie_error_reporting_is_enabled
)
4839 pci_disable_pcie_error_reporting(pdev
);
4841 skd_destruct(skdev
);
4843 pci_release_regions(pdev
);
4844 pci_disable_device(pdev
);
4845 pci_set_drvdata(pdev
, NULL
);
4850 static int skd_pci_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4853 struct skd_device
*skdev
;
4855 skdev
= pci_get_drvdata(pdev
);
4857 pr_err("%s: no device data for PCI\n", pci_name(pdev
));
4861 skd_stop_device(skdev
);
4863 skd_release_irq(skdev
);
4865 for (i
= 0; i
< SKD_MAX_BARS
; i
++)
4866 if (skdev
->mem_map
[i
])
4867 iounmap((u32
*)skdev
->mem_map
[i
]);
4869 if (skdev
->pcie_error_reporting_is_enabled
)
4870 pci_disable_pcie_error_reporting(pdev
);
4872 pci_release_regions(pdev
);
4873 pci_save_state(pdev
);
4874 pci_disable_device(pdev
);
4875 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4879 static int skd_pci_resume(struct pci_dev
*pdev
)
4883 struct skd_device
*skdev
;
4885 skdev
= pci_get_drvdata(pdev
);
4887 pr_err("%s: no device data for PCI\n", pci_name(pdev
));
4891 pci_set_power_state(pdev
, PCI_D0
);
4892 pci_enable_wake(pdev
, PCI_D0
, 0);
4893 pci_restore_state(pdev
);
4895 rc
= pci_enable_device(pdev
);
4898 rc
= pci_request_regions(pdev
, DRV_NAME
);
4901 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
4903 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4905 pr_err("(%s): consistent DMA mask error %d\n",
4906 pci_name(pdev
), rc
);
4909 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
4912 pr_err("(%s): DMA mask error %d\n",
4913 pci_name(pdev
), rc
);
4914 goto err_out_regions
;
4918 pci_set_master(pdev
);
4919 rc
= pci_enable_pcie_error_reporting(pdev
);
4921 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
4923 skdev
->pcie_error_reporting_is_enabled
= 0;
4925 skdev
->pcie_error_reporting_is_enabled
= 1;
4927 for (i
= 0; i
< SKD_MAX_BARS
; i
++) {
4929 skdev
->mem_phys
[i
] = pci_resource_start(pdev
, i
);
4930 skdev
->mem_size
[i
] = (u32
)pci_resource_len(pdev
, i
);
4931 skdev
->mem_map
[i
] = ioremap(skdev
->mem_phys
[i
],
4932 skdev
->mem_size
[i
]);
4933 if (!skdev
->mem_map
[i
]) {
4934 pr_err("(%s): Unable to map adapter memory!\n",
4937 goto err_out_iounmap
;
4939 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4940 skdev
->name
, __func__
, __LINE__
,
4942 (uint64_t)skdev
->mem_phys
[i
], skdev
->mem_size
[i
]);
4944 rc
= skd_acquire_irq(skdev
);
4947 pr_err("(%s): interrupt resource error %d\n",
4948 pci_name(pdev
), rc
);
4949 goto err_out_iounmap
;
4952 rc
= skd_start_timer(skdev
);
4956 init_waitqueue_head(&skdev
->waitq
);
4958 skd_start_device(skdev
);
4963 skd_stop_device(skdev
);
4964 skd_release_irq(skdev
);
4967 for (i
= 0; i
< SKD_MAX_BARS
; i
++)
4968 if (skdev
->mem_map
[i
])
4969 iounmap(skdev
->mem_map
[i
]);
4971 if (skdev
->pcie_error_reporting_is_enabled
)
4972 pci_disable_pcie_error_reporting(pdev
);
4975 pci_release_regions(pdev
);
4978 pci_disable_device(pdev
);
4982 static void skd_pci_shutdown(struct pci_dev
*pdev
)
4984 struct skd_device
*skdev
;
4986 pr_err("skd_pci_shutdown called\n");
4988 skdev
= pci_get_drvdata(pdev
);
4990 pr_err("%s: no device data for PCI\n", pci_name(pdev
));
4994 pr_err("%s: calling stop\n", skd_name(skdev
));
4995 skd_stop_device(skdev
);
4998 static struct pci_driver skd_driver
= {
5000 .id_table
= skd_pci_tbl
,
5001 .probe
= skd_pci_probe
,
5002 .remove
= skd_pci_remove
,
5003 .suspend
= skd_pci_suspend
,
5004 .resume
= skd_pci_resume
,
5005 .shutdown
= skd_pci_shutdown
,
5009 *****************************************************************************
5011 *****************************************************************************
5014 static const char *skd_name(struct skd_device
*skdev
)
5016 memset(skdev
->id_str
, 0, sizeof(skdev
->id_str
));
5018 if (skdev
->inquiry_is_valid
)
5019 snprintf(skdev
->id_str
, sizeof(skdev
->id_str
), "%s:%s:[%s]",
5020 skdev
->name
, skdev
->inq_serial_num
,
5021 pci_name(skdev
->pdev
));
5023 snprintf(skdev
->id_str
, sizeof(skdev
->id_str
), "%s:??:[%s]",
5024 skdev
->name
, pci_name(skdev
->pdev
));
5026 return skdev
->id_str
;
5029 const char *skd_drive_state_to_str(int state
)
5032 case FIT_SR_DRIVE_OFFLINE
:
5034 case FIT_SR_DRIVE_INIT
:
5036 case FIT_SR_DRIVE_ONLINE
:
5038 case FIT_SR_DRIVE_BUSY
:
5040 case FIT_SR_DRIVE_FAULT
:
5042 case FIT_SR_DRIVE_DEGRADED
:
5044 case FIT_SR_PCIE_LINK_DOWN
:
5046 case FIT_SR_DRIVE_SOFT_RESET
:
5047 return "SOFT_RESET";
5048 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD
:
5050 case FIT_SR_DRIVE_INIT_FAULT
:
5051 return "INIT_FAULT";
5052 case FIT_SR_DRIVE_BUSY_SANITIZE
:
5053 return "BUSY_SANITIZE";
5054 case FIT_SR_DRIVE_BUSY_ERASE
:
5055 return "BUSY_ERASE";
5056 case FIT_SR_DRIVE_FW_BOOTING
:
5057 return "FW_BOOTING";
5063 const char *skd_skdev_state_to_str(enum skd_drvr_state state
)
5066 case SKD_DRVR_STATE_LOAD
:
5068 case SKD_DRVR_STATE_IDLE
:
5070 case SKD_DRVR_STATE_BUSY
:
5072 case SKD_DRVR_STATE_STARTING
:
5074 case SKD_DRVR_STATE_ONLINE
:
5076 case SKD_DRVR_STATE_PAUSING
:
5078 case SKD_DRVR_STATE_PAUSED
:
5080 case SKD_DRVR_STATE_DRAINING_TIMEOUT
:
5081 return "DRAINING_TIMEOUT";
5082 case SKD_DRVR_STATE_RESTARTING
:
5083 return "RESTARTING";
5084 case SKD_DRVR_STATE_RESUMING
:
5086 case SKD_DRVR_STATE_STOPPING
:
5088 case SKD_DRVR_STATE_SYNCING
:
5090 case SKD_DRVR_STATE_FAULT
:
5092 case SKD_DRVR_STATE_DISAPPEARED
:
5093 return "DISAPPEARED";
5094 case SKD_DRVR_STATE_BUSY_ERASE
:
5095 return "BUSY_ERASE";
5096 case SKD_DRVR_STATE_BUSY_SANITIZE
:
5097 return "BUSY_SANITIZE";
5098 case SKD_DRVR_STATE_BUSY_IMMINENT
:
5099 return "BUSY_IMMINENT";
5100 case SKD_DRVR_STATE_WAIT_BOOT
:
5108 static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state
)
5111 case SKD_MSG_STATE_IDLE
:
5113 case SKD_MSG_STATE_BUSY
:
5120 static const char *skd_skreq_state_to_str(enum skd_req_state state
)
5123 case SKD_REQ_STATE_IDLE
:
5125 case SKD_REQ_STATE_SETUP
:
5127 case SKD_REQ_STATE_BUSY
:
5129 case SKD_REQ_STATE_COMPLETED
:
5131 case SKD_REQ_STATE_TIMEOUT
:
5133 case SKD_REQ_STATE_ABORTED
:
5140 static void skd_log_skdev(struct skd_device
*skdev
, const char *event
)
5142 pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
5143 skdev
->name
, __func__
, __LINE__
, skdev
->name
, skdev
, event
);
5144 pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
5145 skdev
->name
, __func__
, __LINE__
,
5146 skd_drive_state_to_str(skdev
->drive_state
), skdev
->drive_state
,
5147 skd_skdev_state_to_str(skdev
->state
), skdev
->state
);
5148 pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
5149 skdev
->name
, __func__
, __LINE__
,
5150 skdev
->in_flight
, skdev
->cur_max_queue_depth
,
5151 skdev
->dev_max_queue_depth
, skdev
->queue_low_water_mark
);
5152 pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
5153 skdev
->name
, __func__
, __LINE__
,
5154 skdev
->timeout_stamp
, skdev
->skcomp_cycle
, skdev
->skcomp_ix
);
5157 static void skd_log_skmsg(struct skd_device
*skdev
,
5158 struct skd_fitmsg_context
*skmsg
, const char *event
)
5160 pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
5161 skdev
->name
, __func__
, __LINE__
, skdev
->name
, skmsg
, event
);
5162 pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
5163 skdev
->name
, __func__
, __LINE__
,
5164 skd_skmsg_state_to_str(skmsg
->state
), skmsg
->state
,
5165 skmsg
->id
, skmsg
->length
);
5168 static void skd_log_skreq(struct skd_device
*skdev
,
5169 struct skd_request_context
*skreq
, const char *event
)
5171 pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
5172 skdev
->name
, __func__
, __LINE__
, skdev
->name
, skreq
, event
);
5173 pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
5174 skdev
->name
, __func__
, __LINE__
,
5175 skd_skreq_state_to_str(skreq
->state
), skreq
->state
,
5176 skreq
->id
, skreq
->fitmsg_id
);
5177 pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
5178 skdev
->name
, __func__
, __LINE__
,
5179 skreq
->timeout_stamp
, skreq
->sg_data_dir
, skreq
->n_sg
);
5181 if (skreq
->req
!= NULL
) {
5182 struct request
*req
= skreq
->req
;
5183 u32 lba
= (u32
)blk_rq_pos(req
);
5184 u32 count
= blk_rq_sectors(req
);
5186 pr_debug("%s:%s:%d "
5187 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5188 skdev
->name
, __func__
, __LINE__
,
5189 req
, lba
, lba
, count
, count
,
5190 (int)rq_data_dir(req
));
5192 pr_debug("%s:%s:%d req=NULL\n",
5193 skdev
->name
, __func__
, __LINE__
);
5197 *****************************************************************************
5199 *****************************************************************************
5202 static int __init
skd_init(void)
5204 pr_info(PFX
" v%s-b%s loaded\n", DRV_VERSION
, DRV_BUILD_ID
);
5206 switch (skd_isr_type
) {
5207 case SKD_IRQ_LEGACY
:
5212 pr_err(PFX
"skd_isr_type %d invalid, re-set to %d\n",
5213 skd_isr_type
, SKD_IRQ_DEFAULT
);
5214 skd_isr_type
= SKD_IRQ_DEFAULT
;
5217 if (skd_max_queue_depth
< 1 ||
5218 skd_max_queue_depth
> SKD_MAX_QUEUE_DEPTH
) {
5219 pr_err(PFX
"skd_max_queue_depth %d invalid, re-set to %d\n",
5220 skd_max_queue_depth
, SKD_MAX_QUEUE_DEPTH_DEFAULT
);
5221 skd_max_queue_depth
= SKD_MAX_QUEUE_DEPTH_DEFAULT
;
5224 if (skd_max_req_per_msg
< 1 || skd_max_req_per_msg
> 14) {
5225 pr_err(PFX
"skd_max_req_per_msg %d invalid, re-set to %d\n",
5226 skd_max_req_per_msg
, SKD_MAX_REQ_PER_MSG_DEFAULT
);
5227 skd_max_req_per_msg
= SKD_MAX_REQ_PER_MSG_DEFAULT
;
5230 if (skd_sgs_per_request
< 1 || skd_sgs_per_request
> 4096) {
5231 pr_err(PFX
"skd_sg_per_request %d invalid, re-set to %d\n",
5232 skd_sgs_per_request
, SKD_N_SG_PER_REQ_DEFAULT
);
5233 skd_sgs_per_request
= SKD_N_SG_PER_REQ_DEFAULT
;
5236 if (skd_dbg_level
< 0 || skd_dbg_level
> 2) {
5237 pr_err(PFX
"skd_dbg_level %d invalid, re-set to %d\n",
5242 if (skd_isr_comp_limit
< 0) {
5243 pr_err(PFX
"skd_isr_comp_limit %d invalid, set to %d\n",
5244 skd_isr_comp_limit
, 0);
5245 skd_isr_comp_limit
= 0;
5248 if (skd_max_pass_thru
< 1 || skd_max_pass_thru
> 50) {
5249 pr_err(PFX
"skd_max_pass_thru %d invalid, re-set to %d\n",
5250 skd_max_pass_thru
, SKD_N_SPECIAL_CONTEXT
);
5251 skd_max_pass_thru
= SKD_N_SPECIAL_CONTEXT
;
5254 return pci_register_driver(&skd_driver
);
5257 static void __exit
skd_exit(void)
5259 pr_info(PFX
" v%s-b%s unloading\n", DRV_VERSION
, DRV_BUILD_ID
);
5261 pci_unregister_driver(&skd_driver
);
5264 unregister_blkdev(skd_major
, DRV_NAME
);
5267 module_init(skd_init
);
5268 module_exit(skd_exit
);