2 * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
3 * was acquired by Western Digital in 2012.
5 * Copyright 2012 sTec, Inc.
6 * Copyright (c) 2017 Western Digital Corporation or its affiliates.
8 * This file is part of the Linux kernel, and is made available under
9 * the terms of the GNU General Public License version 2.
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/blkdev.h>
19 #include <linux/blk-mq.h>
20 #include <linux/sched.h>
21 #include <linux/interrupt.h>
22 #include <linux/compiler.h>
23 #include <linux/workqueue.h>
24 #include <linux/delay.h>
25 #include <linux/time.h>
26 #include <linux/hdreg.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/completion.h>
29 #include <linux/scatterlist.h>
30 #include <linux/version.h>
31 #include <linux/err.h>
32 #include <linux/aer.h>
33 #include <linux/wait.h>
34 #include <linux/stringify.h>
35 #include <scsi/scsi.h>
38 #include <linux/uaccess.h>
39 #include <asm/unaligned.h>
41 #include "skd_s1120.h"
43 static int skd_dbg_level
;
44 static int skd_isr_comp_limit
= 4;
47 SKD_FLUSH_INITIALIZER
,
48 SKD_FLUSH_ZERO_SIZE_FIRST
,
49 SKD_FLUSH_DATA_SECOND
,
52 #define SKD_ASSERT(expr) \
54 if (unlikely(!(expr))) { \
55 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
56 # expr, __FILE__, __func__, __LINE__); \
60 #define DRV_NAME "skd"
61 #define DRV_VERSION "2.2.1"
62 #define DRV_BUILD_ID "0260"
63 #define PFX DRV_NAME ": "
65 MODULE_LICENSE("GPL");
67 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID
")");
68 MODULE_VERSION(DRV_VERSION
"-" DRV_BUILD_ID
);
70 #define PCI_VENDOR_ID_STEC 0x1B39
71 #define PCI_DEVICE_ID_S1120 0x0001
73 #define SKD_FUA_NV (1 << 1)
74 #define SKD_MINORS_PER_DEVICE 16
76 #define SKD_MAX_QUEUE_DEPTH 200u
78 #define SKD_PAUSE_TIMEOUT (5 * 1000)
80 #define SKD_N_FITMSG_BYTES (512u)
81 #define SKD_MAX_REQ_PER_MSG 14
83 #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
85 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
86 * 128KB limit. That allows 4096*4K = 16M xfer size
88 #define SKD_N_SG_PER_REQ_DEFAULT 256u
90 #define SKD_N_COMPLETION_ENTRY 256u
91 #define SKD_N_READ_CAP_BYTES (8u)
93 #define SKD_N_INTERNAL_BYTES (512u)
95 #define SKD_SKCOMP_SIZE \
96 ((sizeof(struct fit_completion_entry_v1) + \
97 sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
99 /* 5 bits of uniqifier, 0xF800 */
100 #define SKD_ID_INCR (0x400)
101 #define SKD_ID_TABLE_MASK (3u << 8u)
102 #define SKD_ID_RW_REQUEST (0u << 8u)
103 #define SKD_ID_INTERNAL (1u << 8u)
104 #define SKD_ID_FIT_MSG (3u << 8u)
105 #define SKD_ID_SLOT_MASK 0x00FFu
106 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
108 #define SKD_N_TIMEOUT_SLOT 4u
109 #define SKD_TIMEOUT_SLOT_MASK 3u
111 #define SKD_N_MAX_SECTORS 2048u
113 #define SKD_MAX_RETRIES 2u
115 #define SKD_TIMER_SECONDS(seconds) (seconds)
116 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
118 #define INQ_STD_NBYTES 36
120 enum skd_drvr_state
{
124 SKD_DRVR_STATE_STARTING
,
125 SKD_DRVR_STATE_ONLINE
,
126 SKD_DRVR_STATE_PAUSING
,
127 SKD_DRVR_STATE_PAUSED
,
128 SKD_DRVR_STATE_DRAINING_TIMEOUT
,
129 SKD_DRVR_STATE_RESTARTING
,
130 SKD_DRVR_STATE_RESUMING
,
131 SKD_DRVR_STATE_STOPPING
,
132 SKD_DRVR_STATE_FAULT
,
133 SKD_DRVR_STATE_DISAPPEARED
,
134 SKD_DRVR_STATE_PROTOCOL_MISMATCH
,
135 SKD_DRVR_STATE_BUSY_ERASE
,
136 SKD_DRVR_STATE_BUSY_SANITIZE
,
137 SKD_DRVR_STATE_BUSY_IMMINENT
,
138 SKD_DRVR_STATE_WAIT_BOOT
,
139 SKD_DRVR_STATE_SYNCING
,
142 #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
143 #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
144 #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
145 #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
146 #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
147 #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
148 #define SKD_START_WAIT_SECONDS 90u
154 SKD_REQ_STATE_COMPLETED
,
155 SKD_REQ_STATE_TIMEOUT
,
158 enum skd_check_status_action
{
159 SKD_CHECK_STATUS_REPORT_GOOD
,
160 SKD_CHECK_STATUS_REPORT_SMART_ALERT
,
161 SKD_CHECK_STATUS_REQUEUE_REQUEST
,
162 SKD_CHECK_STATUS_REPORT_ERROR
,
163 SKD_CHECK_STATUS_BUSY_IMMINENT
,
167 struct fit_msg_hdr fmh
;
168 struct skd_scsi_request scsi
[SKD_MAX_REQ_PER_MSG
];
171 struct skd_fitmsg_context
{
176 struct skd_msg_buf
*msg_buf
;
177 dma_addr_t mb_dma_address
;
180 struct skd_request_context
{
181 enum skd_req_state state
;
190 enum dma_data_direction data_dir
;
191 struct scatterlist
*sg
;
195 struct fit_sg_descriptor
*sksg_list
;
196 dma_addr_t sksg_dma_address
;
198 struct fit_completion_entry_v1 completion
;
200 struct fit_comp_error_info err_info
;
204 struct skd_special_context
{
205 struct skd_request_context req
;
208 dma_addr_t db_dma_address
;
210 struct skd_msg_buf
*msg_buf
;
211 dma_addr_t mb_dma_address
;
214 typedef enum skd_irq_type
{
220 #define SKD_MAX_BARS 2
223 void __iomem
*mem_map
[SKD_MAX_BARS
];
224 resource_size_t mem_phys
[SKD_MAX_BARS
];
225 u32 mem_size
[SKD_MAX_BARS
];
227 struct skd_msix_entry
*msix_entries
;
229 struct pci_dev
*pdev
;
230 int pcie_error_reporting_is_enabled
;
233 struct gendisk
*disk
;
234 struct request_queue
*queue
;
235 struct device
*class_dev
;
243 enum skd_drvr_state state
;
247 u32 cur_max_queue_depth
;
248 u32 queue_low_water_mark
;
249 u32 dev_max_queue_depth
;
251 u32 num_fitmsg_context
;
254 atomic_t timeout_slot
[SKD_N_TIMEOUT_SLOT
];
255 atomic_t timeout_stamp
;
256 struct skd_fitmsg_context
*skmsg_table
;
258 struct skd_request_context
*skreq_table
;
260 struct skd_special_context internal_skspcl
;
261 u32 read_cap_blocksize
;
262 u32 read_cap_last_lba
;
263 int read_cap_is_valid
;
264 int inquiry_is_valid
;
265 u8 inq_serial_num
[13]; /*12 chars plus null term */
269 struct fit_completion_entry_v1
*skcomp_table
;
270 struct fit_comp_error_info
*skerr_table
;
271 dma_addr_t cq_dma_address
;
273 wait_queue_head_t waitq
;
275 struct timer_list timer
;
285 u32 connect_time_stamp
;
287 #define SKD_MAX_CONNECT_RETRIES 16
292 struct work_struct completion_worker
;
295 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
296 #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
297 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
299 static inline u32
skd_reg_read32(struct skd_device
*skdev
, u32 offset
)
301 u32 val
= readl(skdev
->mem_map
[1] + offset
);
303 if (unlikely(skdev
->dbg_level
>= 2))
304 dev_dbg(&skdev
->pdev
->dev
, "offset %x = %x\n", offset
, val
);
308 static inline void skd_reg_write32(struct skd_device
*skdev
, u32 val
,
311 writel(val
, skdev
->mem_map
[1] + offset
);
312 if (unlikely(skdev
->dbg_level
>= 2))
313 dev_dbg(&skdev
->pdev
->dev
, "offset %x = %x\n", offset
, val
);
316 static inline void skd_reg_write64(struct skd_device
*skdev
, u64 val
,
319 writeq(val
, skdev
->mem_map
[1] + offset
);
320 if (unlikely(skdev
->dbg_level
>= 2))
321 dev_dbg(&skdev
->pdev
->dev
, "offset %x = %016llx\n", offset
,
326 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
327 static int skd_isr_type
= SKD_IRQ_DEFAULT
;
329 module_param(skd_isr_type
, int, 0444);
330 MODULE_PARM_DESC(skd_isr_type
, "Interrupt type capability."
331 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
333 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
334 static int skd_max_req_per_msg
= SKD_MAX_REQ_PER_MSG_DEFAULT
;
336 module_param(skd_max_req_per_msg
, int, 0444);
337 MODULE_PARM_DESC(skd_max_req_per_msg
,
338 "Maximum SCSI requests packed in a single message."
339 " (1-" __stringify(SKD_MAX_REQ_PER_MSG
) ", default==1)");
341 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
342 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
343 static int skd_max_queue_depth
= SKD_MAX_QUEUE_DEPTH_DEFAULT
;
345 module_param(skd_max_queue_depth
, int, 0444);
346 MODULE_PARM_DESC(skd_max_queue_depth
,
347 "Maximum SCSI requests issued to s1120."
348 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR
")");
350 static int skd_sgs_per_request
= SKD_N_SG_PER_REQ_DEFAULT
;
351 module_param(skd_sgs_per_request
, int, 0444);
352 MODULE_PARM_DESC(skd_sgs_per_request
,
353 "Maximum SG elements per block request."
354 " (1-4096, default==256)");
356 static int skd_max_pass_thru
= 1;
357 module_param(skd_max_pass_thru
, int, 0444);
358 MODULE_PARM_DESC(skd_max_pass_thru
,
359 "Maximum SCSI pass-thru at a time. IGNORED");
361 module_param(skd_dbg_level
, int, 0444);
362 MODULE_PARM_DESC(skd_dbg_level
, "s1120 debug level (0,1,2)");
364 module_param(skd_isr_comp_limit
, int, 0444);
365 MODULE_PARM_DESC(skd_isr_comp_limit
, "s1120 isr comp limit (0=none) default=4");
367 /* Major device number dynamically assigned. */
368 static u32 skd_major
;
370 static void skd_destruct(struct skd_device
*skdev
);
371 static const struct block_device_operations skd_blockdev_ops
;
372 static void skd_send_fitmsg(struct skd_device
*skdev
,
373 struct skd_fitmsg_context
*skmsg
);
374 static void skd_send_special_fitmsg(struct skd_device
*skdev
,
375 struct skd_special_context
*skspcl
);
376 static void skd_request_fn(struct request_queue
*rq
);
377 static void skd_end_request(struct skd_device
*skdev
, struct request
*req
,
378 blk_status_t status
);
379 static bool skd_preop_sg_list(struct skd_device
*skdev
,
380 struct skd_request_context
*skreq
);
381 static void skd_postop_sg_list(struct skd_device
*skdev
,
382 struct skd_request_context
*skreq
);
384 static void skd_restart_device(struct skd_device
*skdev
);
385 static int skd_quiesce_dev(struct skd_device
*skdev
);
386 static int skd_unquiesce_dev(struct skd_device
*skdev
);
387 static void skd_disable_interrupts(struct skd_device
*skdev
);
388 static void skd_isr_fwstate(struct skd_device
*skdev
);
389 static void skd_recover_requests(struct skd_device
*skdev
);
390 static void skd_soft_reset(struct skd_device
*skdev
);
392 const char *skd_drive_state_to_str(int state
);
393 const char *skd_skdev_state_to_str(enum skd_drvr_state state
);
394 static void skd_log_skdev(struct skd_device
*skdev
, const char *event
);
395 static void skd_log_skreq(struct skd_device
*skdev
,
396 struct skd_request_context
*skreq
, const char *event
);
399 *****************************************************************************
400 * READ/WRITE REQUESTS
401 *****************************************************************************
403 static void skd_fail_all_pending(struct skd_device
*skdev
)
405 struct request_queue
*q
= skdev
->queue
;
409 req
= blk_peek_request(q
);
412 WARN_ON_ONCE(blk_queue_start_tag(q
, req
));
413 __blk_end_request_all(req
, BLK_STS_IOERR
);
418 skd_prep_rw_cdb(struct skd_scsi_request
*scsi_req
,
419 int data_dir
, unsigned lba
,
422 if (data_dir
== READ
)
423 scsi_req
->cdb
[0] = READ_10
;
425 scsi_req
->cdb
[0] = WRITE_10
;
427 scsi_req
->cdb
[1] = 0;
428 scsi_req
->cdb
[2] = (lba
& 0xff000000) >> 24;
429 scsi_req
->cdb
[3] = (lba
& 0xff0000) >> 16;
430 scsi_req
->cdb
[4] = (lba
& 0xff00) >> 8;
431 scsi_req
->cdb
[5] = (lba
& 0xff);
432 scsi_req
->cdb
[6] = 0;
433 scsi_req
->cdb
[7] = (count
& 0xff00) >> 8;
434 scsi_req
->cdb
[8] = count
& 0xff;
435 scsi_req
->cdb
[9] = 0;
439 skd_prep_zerosize_flush_cdb(struct skd_scsi_request
*scsi_req
,
440 struct skd_request_context
*skreq
)
442 skreq
->flush_cmd
= 1;
444 scsi_req
->cdb
[0] = SYNCHRONIZE_CACHE
;
445 scsi_req
->cdb
[1] = 0;
446 scsi_req
->cdb
[2] = 0;
447 scsi_req
->cdb
[3] = 0;
448 scsi_req
->cdb
[4] = 0;
449 scsi_req
->cdb
[5] = 0;
450 scsi_req
->cdb
[6] = 0;
451 scsi_req
->cdb
[7] = 0;
452 scsi_req
->cdb
[8] = 0;
453 scsi_req
->cdb
[9] = 0;
457 * Return true if and only if all pending requests should be failed.
459 static bool skd_fail_all(struct request_queue
*q
)
461 struct skd_device
*skdev
= q
->queuedata
;
463 SKD_ASSERT(skdev
->state
!= SKD_DRVR_STATE_ONLINE
);
465 skd_log_skdev(skdev
, "req_not_online");
466 switch (skdev
->state
) {
467 case SKD_DRVR_STATE_PAUSING
:
468 case SKD_DRVR_STATE_PAUSED
:
469 case SKD_DRVR_STATE_STARTING
:
470 case SKD_DRVR_STATE_RESTARTING
:
471 case SKD_DRVR_STATE_WAIT_BOOT
:
472 /* In case of starting, we haven't started the queue,
473 * so we can't get here... but requests are
474 * possibly hanging out waiting for us because we
475 * reported the dev/skd0 already. They'll wait
476 * forever if connect doesn't complete.
477 * What to do??? delay dev/skd0 ??
479 case SKD_DRVR_STATE_BUSY
:
480 case SKD_DRVR_STATE_BUSY_IMMINENT
:
481 case SKD_DRVR_STATE_BUSY_ERASE
:
482 case SKD_DRVR_STATE_DRAINING_TIMEOUT
:
485 case SKD_DRVR_STATE_BUSY_SANITIZE
:
486 case SKD_DRVR_STATE_STOPPING
:
487 case SKD_DRVR_STATE_SYNCING
:
488 case SKD_DRVR_STATE_FAULT
:
489 case SKD_DRVR_STATE_DISAPPEARED
:
495 static void skd_request_fn(struct request_queue
*q
)
497 struct skd_device
*skdev
= q
->queuedata
;
498 struct skd_fitmsg_context
*skmsg
= NULL
;
499 struct fit_msg_hdr
*fmh
= NULL
;
500 struct skd_request_context
*skreq
;
501 struct request
*req
= NULL
;
502 struct skd_scsi_request
*scsi_req
;
503 unsigned long io_flags
;
513 if (skdev
->state
!= SKD_DRVR_STATE_ONLINE
) {
515 skd_fail_all_pending(skdev
);
519 if (blk_queue_stopped(skdev
->queue
)) {
520 if (atomic_read(&skdev
->in_flight
) >=
521 skdev
->queue_low_water_mark
)
522 /* There is still some kind of shortage */
525 queue_flag_clear(QUEUE_FLAG_STOPPED
, skdev
->queue
);
530 * - There are no more native requests
531 * - There are already the maximum number of requests in progress
532 * - There are no more skd_request_context entries
533 * - There are no more FIT msg buffers
539 req
= blk_peek_request(q
);
541 /* Are there any native requests to start? */
545 lba
= (u32
)blk_rq_pos(req
);
546 count
= blk_rq_sectors(req
);
547 data_dir
= rq_data_dir(req
);
548 io_flags
= req
->cmd_flags
;
550 if (req_op(req
) == REQ_OP_FLUSH
)
553 if (io_flags
& REQ_FUA
)
556 dev_dbg(&skdev
->pdev
->dev
,
557 "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
558 req
, lba
, lba
, count
, count
, data_dir
);
560 /* At this point we know there is a request */
562 /* Are too many requets already in progress? */
563 if (atomic_read(&skdev
->in_flight
) >=
564 skdev
->cur_max_queue_depth
) {
565 dev_dbg(&skdev
->pdev
->dev
, "qdepth %d, limit %d\n",
566 atomic_read(&skdev
->in_flight
),
567 skdev
->cur_max_queue_depth
);
572 * OK to now dequeue request from q.
574 * At this point we are comitted to either start or reject
575 * the native request. Note that skd_request_context is
576 * available but is still at the head of the free list.
578 WARN_ON_ONCE(blk_queue_start_tag(q
, req
));
580 tag
= blk_mq_unique_tag(req
);
581 WARN_ONCE(tag
>= skd_max_queue_depth
,
582 "%#x > %#x (nr_requests = %lu)\n", tag
,
583 skd_max_queue_depth
, q
->nr_requests
);
585 skreq
= &skdev
->skreq_table
[tag
];
586 SKD_ASSERT(skreq
->state
== SKD_REQ_STATE_IDLE
);
587 SKD_ASSERT((skreq
->id
& SKD_ID_INCR
) == 0);
589 skreq
->id
= tag
+ SKD_ID_RW_REQUEST
;
590 skreq
->flush_cmd
= 0;
592 skreq
->sg_byte_count
= 0;
595 skreq
->fitmsg_id
= 0;
597 skreq
->data_dir
= data_dir
== READ
? DMA_FROM_DEVICE
:
600 if (req
->bio
&& !skd_preop_sg_list(skdev
, skreq
)) {
601 dev_dbg(&skdev
->pdev
->dev
, "error Out\n");
602 skd_end_request(skdev
, skreq
->req
, BLK_STS_RESOURCE
);
606 /* Either a FIT msg is in progress or we have to start one. */
608 skmsg
= &skdev
->skmsg_table
[tag
];
610 /* Initialize the FIT msg header */
611 fmh
= &skmsg
->msg_buf
->fmh
;
612 memset(fmh
, 0, sizeof(*fmh
));
613 fmh
->protocol_id
= FIT_PROTOCOL_ID_SOFIT
;
614 skmsg
->length
= sizeof(*fmh
);
617 skreq
->fitmsg_id
= skmsg
->id
;
620 &skmsg
->msg_buf
->scsi
[fmh
->num_protocol_cmds_coalesced
];
621 memset(scsi_req
, 0, sizeof(*scsi_req
));
623 be_dmaa
= cpu_to_be64(skreq
->sksg_dma_address
);
624 cmdctxt
= skreq
->id
+ SKD_ID_INCR
;
626 scsi_req
->hdr
.tag
= cmdctxt
;
627 scsi_req
->hdr
.sg_list_dma_address
= be_dmaa
;
629 if (flush
== SKD_FLUSH_ZERO_SIZE_FIRST
) {
630 skd_prep_zerosize_flush_cdb(scsi_req
, skreq
);
631 SKD_ASSERT(skreq
->flush_cmd
== 1);
633 skd_prep_rw_cdb(scsi_req
, data_dir
, lba
, count
);
637 scsi_req
->cdb
[1] |= SKD_FUA_NV
;
639 scsi_req
->hdr
.sg_list_len_bytes
=
640 cpu_to_be32(skreq
->sg_byte_count
);
642 /* Complete resource allocations. */
643 skreq
->state
= SKD_REQ_STATE_BUSY
;
644 skreq
->id
+= SKD_ID_INCR
;
646 skmsg
->length
+= sizeof(struct skd_scsi_request
);
647 fmh
->num_protocol_cmds_coalesced
++;
650 * Update the active request counts.
651 * Capture the timeout timestamp.
653 skreq
->timeout_stamp
= atomic_read(&skdev
->timeout_stamp
);
654 timo_slot
= skreq
->timeout_stamp
& SKD_TIMEOUT_SLOT_MASK
;
655 atomic_inc(&skdev
->timeout_slot
[timo_slot
]);
656 atomic_inc(&skdev
->in_flight
);
657 dev_dbg(&skdev
->pdev
->dev
, "req=0x%x busy=%d\n", skreq
->id
,
658 atomic_read(&skdev
->in_flight
));
661 * If the FIT msg buffer is full send it.
663 if (fmh
->num_protocol_cmds_coalesced
>= skd_max_req_per_msg
) {
664 skd_send_fitmsg(skdev
, skmsg
);
670 /* If the FIT msg buffer is not empty send what we got. */
672 WARN_ON_ONCE(!fmh
->num_protocol_cmds_coalesced
);
673 skd_send_fitmsg(skdev
, skmsg
);
679 * If req is non-NULL it means there is something to do but
680 * we are out of a resource.
683 blk_stop_queue(skdev
->queue
);
686 static void skd_end_request(struct skd_device
*skdev
, struct request
*req
,
689 if (unlikely(error
)) {
690 char *cmd
= (rq_data_dir(req
) == READ
) ? "read" : "write";
691 u32 lba
= (u32
)blk_rq_pos(req
);
692 u32 count
= blk_rq_sectors(req
);
694 dev_err(&skdev
->pdev
->dev
,
695 "Error cmd=%s sect=%u count=%u id=0x%x\n", cmd
, lba
,
698 dev_dbg(&skdev
->pdev
->dev
, "id=0x%x error=%d\n", req
->tag
,
701 __blk_end_request_all(req
, error
);
704 static bool skd_preop_sg_list(struct skd_device
*skdev
,
705 struct skd_request_context
*skreq
)
707 struct request
*req
= skreq
->req
;
708 struct scatterlist
*sgl
= &skreq
->sg
[0], *sg
;
712 skreq
->sg_byte_count
= 0;
714 WARN_ON_ONCE(skreq
->data_dir
!= DMA_TO_DEVICE
&&
715 skreq
->data_dir
!= DMA_FROM_DEVICE
);
717 n_sg
= blk_rq_map_sg(skdev
->queue
, req
, sgl
);
722 * Map scatterlist to PCI bus addresses.
723 * Note PCI might change the number of entries.
725 n_sg
= pci_map_sg(skdev
->pdev
, sgl
, n_sg
, skreq
->data_dir
);
729 SKD_ASSERT(n_sg
<= skdev
->sgs_per_request
);
733 for_each_sg(sgl
, sg
, n_sg
, i
) {
734 struct fit_sg_descriptor
*sgd
= &skreq
->sksg_list
[i
];
735 u32 cnt
= sg_dma_len(sg
);
736 uint64_t dma_addr
= sg_dma_address(sg
);
738 sgd
->control
= FIT_SGD_CONTROL_NOT_LAST
;
739 sgd
->byte_count
= cnt
;
740 skreq
->sg_byte_count
+= cnt
;
741 sgd
->host_side_addr
= dma_addr
;
742 sgd
->dev_side_addr
= 0;
745 skreq
->sksg_list
[n_sg
- 1].next_desc_ptr
= 0LL;
746 skreq
->sksg_list
[n_sg
- 1].control
= FIT_SGD_CONTROL_LAST
;
748 if (unlikely(skdev
->dbg_level
> 1)) {
749 dev_dbg(&skdev
->pdev
->dev
,
750 "skreq=%x sksg_list=%p sksg_dma=%llx\n",
751 skreq
->id
, skreq
->sksg_list
, skreq
->sksg_dma_address
);
752 for (i
= 0; i
< n_sg
; i
++) {
753 struct fit_sg_descriptor
*sgd
= &skreq
->sksg_list
[i
];
755 dev_dbg(&skdev
->pdev
->dev
,
756 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
757 i
, sgd
->byte_count
, sgd
->control
,
758 sgd
->host_side_addr
, sgd
->next_desc_ptr
);
765 static void skd_postop_sg_list(struct skd_device
*skdev
,
766 struct skd_request_context
*skreq
)
769 * restore the next ptr for next IO request so we
770 * don't have to set it every time.
772 skreq
->sksg_list
[skreq
->n_sg
- 1].next_desc_ptr
=
773 skreq
->sksg_dma_address
+
774 ((skreq
->n_sg
) * sizeof(struct fit_sg_descriptor
));
775 pci_unmap_sg(skdev
->pdev
, &skreq
->sg
[0], skreq
->n_sg
, skreq
->data_dir
);
779 *****************************************************************************
781 *****************************************************************************
784 static void skd_timer_tick_not_online(struct skd_device
*skdev
);
786 static void skd_timer_tick(ulong arg
)
788 struct skd_device
*skdev
= (struct skd_device
*)arg
;
791 unsigned long reqflags
;
794 if (skdev
->state
== SKD_DRVR_STATE_FAULT
)
795 /* The driver has declared fault, and we want it to
796 * stay that way until driver is reloaded.
800 spin_lock_irqsave(&skdev
->lock
, reqflags
);
802 state
= SKD_READL(skdev
, FIT_STATUS
);
803 state
&= FIT_SR_DRIVE_STATE_MASK
;
804 if (state
!= skdev
->drive_state
)
805 skd_isr_fwstate(skdev
);
807 if (skdev
->state
!= SKD_DRVR_STATE_ONLINE
) {
808 skd_timer_tick_not_online(skdev
);
811 timo_slot
= atomic_inc_return(&skdev
->timeout_stamp
) &
812 SKD_TIMEOUT_SLOT_MASK
;
815 * All requests that happened during the previous use of
816 * this slot should be done by now. The previous use was
817 * over 7 seconds ago.
819 if (atomic_read(&skdev
->timeout_slot
[timo_slot
]) == 0)
822 /* Something is overdue */
823 dev_dbg(&skdev
->pdev
->dev
, "found %d timeouts, draining busy=%d\n",
824 atomic_read(&skdev
->timeout_slot
[timo_slot
]),
825 atomic_read(&skdev
->in_flight
));
826 dev_err(&skdev
->pdev
->dev
, "Overdue IOs (%d), busy %d\n",
827 atomic_read(&skdev
->timeout_slot
[timo_slot
]),
828 atomic_read(&skdev
->in_flight
));
830 skdev
->timer_countdown
= SKD_DRAINING_TIMO
;
831 skdev
->state
= SKD_DRVR_STATE_DRAINING_TIMEOUT
;
832 skdev
->timo_slot
= timo_slot
;
833 blk_stop_queue(skdev
->queue
);
836 mod_timer(&skdev
->timer
, (jiffies
+ HZ
));
838 spin_unlock_irqrestore(&skdev
->lock
, reqflags
);
841 static void skd_timer_tick_not_online(struct skd_device
*skdev
)
843 switch (skdev
->state
) {
844 case SKD_DRVR_STATE_IDLE
:
845 case SKD_DRVR_STATE_LOAD
:
847 case SKD_DRVR_STATE_BUSY_SANITIZE
:
848 dev_dbg(&skdev
->pdev
->dev
,
849 "drive busy sanitize[%x], driver[%x]\n",
850 skdev
->drive_state
, skdev
->state
);
851 /* If we've been in sanitize for 3 seconds, we figure we're not
852 * going to get anymore completions, so recover requests now
854 if (skdev
->timer_countdown
> 0) {
855 skdev
->timer_countdown
--;
858 skd_recover_requests(skdev
);
861 case SKD_DRVR_STATE_BUSY
:
862 case SKD_DRVR_STATE_BUSY_IMMINENT
:
863 case SKD_DRVR_STATE_BUSY_ERASE
:
864 dev_dbg(&skdev
->pdev
->dev
, "busy[%x], countdown=%d\n",
865 skdev
->state
, skdev
->timer_countdown
);
866 if (skdev
->timer_countdown
> 0) {
867 skdev
->timer_countdown
--;
870 dev_dbg(&skdev
->pdev
->dev
,
871 "busy[%x], timedout=%d, restarting device.",
872 skdev
->state
, skdev
->timer_countdown
);
873 skd_restart_device(skdev
);
876 case SKD_DRVR_STATE_WAIT_BOOT
:
877 case SKD_DRVR_STATE_STARTING
:
878 if (skdev
->timer_countdown
> 0) {
879 skdev
->timer_countdown
--;
882 /* For now, we fault the drive. Could attempt resets to
883 * revcover at some point. */
884 skdev
->state
= SKD_DRVR_STATE_FAULT
;
886 dev_err(&skdev
->pdev
->dev
, "DriveFault Connect Timeout (%x)\n",
889 /*start the queue so we can respond with error to requests */
890 /* wakeup anyone waiting for startup complete */
891 blk_start_queue(skdev
->queue
);
892 skdev
->gendisk_on
= -1;
893 wake_up_interruptible(&skdev
->waitq
);
896 case SKD_DRVR_STATE_ONLINE
:
897 /* shouldn't get here. */
900 case SKD_DRVR_STATE_PAUSING
:
901 case SKD_DRVR_STATE_PAUSED
:
904 case SKD_DRVR_STATE_DRAINING_TIMEOUT
:
905 dev_dbg(&skdev
->pdev
->dev
,
906 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
907 skdev
->timo_slot
, skdev
->timer_countdown
,
908 atomic_read(&skdev
->in_flight
),
909 atomic_read(&skdev
->timeout_slot
[skdev
->timo_slot
]));
910 /* if the slot has cleared we can let the I/O continue */
911 if (atomic_read(&skdev
->timeout_slot
[skdev
->timo_slot
]) == 0) {
912 dev_dbg(&skdev
->pdev
->dev
,
913 "Slot drained, starting queue.\n");
914 skdev
->state
= SKD_DRVR_STATE_ONLINE
;
915 blk_start_queue(skdev
->queue
);
918 if (skdev
->timer_countdown
> 0) {
919 skdev
->timer_countdown
--;
922 skd_restart_device(skdev
);
925 case SKD_DRVR_STATE_RESTARTING
:
926 if (skdev
->timer_countdown
> 0) {
927 skdev
->timer_countdown
--;
930 /* For now, we fault the drive. Could attempt resets to
931 * revcover at some point. */
932 skdev
->state
= SKD_DRVR_STATE_FAULT
;
933 dev_err(&skdev
->pdev
->dev
,
934 "DriveFault Reconnect Timeout (%x)\n",
938 * Recovering does two things:
939 * 1. completes IO with error
940 * 2. reclaims dma resources
941 * When is it safe to recover requests?
942 * - if the drive state is faulted
943 * - if the state is still soft reset after out timeout
944 * - if the drive registers are dead (state = FF)
945 * If it is "unsafe", we still need to recover, so we will
946 * disable pci bus mastering and disable our interrupts.
949 if ((skdev
->drive_state
== FIT_SR_DRIVE_SOFT_RESET
) ||
950 (skdev
->drive_state
== FIT_SR_DRIVE_FAULT
) ||
951 (skdev
->drive_state
== FIT_SR_DRIVE_STATE_MASK
))
952 /* It never came out of soft reset. Try to
953 * recover the requests and then let them
954 * fail. This is to mitigate hung processes. */
955 skd_recover_requests(skdev
);
957 dev_err(&skdev
->pdev
->dev
, "Disable BusMaster (%x)\n",
959 pci_disable_device(skdev
->pdev
);
960 skd_disable_interrupts(skdev
);
961 skd_recover_requests(skdev
);
964 /*start the queue so we can respond with error to requests */
965 /* wakeup anyone waiting for startup complete */
966 blk_start_queue(skdev
->queue
);
967 skdev
->gendisk_on
= -1;
968 wake_up_interruptible(&skdev
->waitq
);
971 case SKD_DRVR_STATE_RESUMING
:
972 case SKD_DRVR_STATE_STOPPING
:
973 case SKD_DRVR_STATE_SYNCING
:
974 case SKD_DRVR_STATE_FAULT
:
975 case SKD_DRVR_STATE_DISAPPEARED
:
981 static int skd_start_timer(struct skd_device
*skdev
)
985 setup_timer(&skdev
->timer
, skd_timer_tick
, (ulong
)skdev
);
987 rc
= mod_timer(&skdev
->timer
, (jiffies
+ HZ
));
989 dev_err(&skdev
->pdev
->dev
, "failed to start timer %d\n", rc
);
993 static void skd_kill_timer(struct skd_device
*skdev
)
995 del_timer_sync(&skdev
->timer
);
999 *****************************************************************************
1000 * INTERNAL REQUESTS -- generated by driver itself
1001 *****************************************************************************
1004 static int skd_format_internal_skspcl(struct skd_device
*skdev
)
1006 struct skd_special_context
*skspcl
= &skdev
->internal_skspcl
;
1007 struct fit_sg_descriptor
*sgd
= &skspcl
->req
.sksg_list
[0];
1008 struct fit_msg_hdr
*fmh
;
1009 uint64_t dma_address
;
1010 struct skd_scsi_request
*scsi
;
1012 fmh
= &skspcl
->msg_buf
->fmh
;
1013 fmh
->protocol_id
= FIT_PROTOCOL_ID_SOFIT
;
1014 fmh
->num_protocol_cmds_coalesced
= 1;
1016 scsi
= &skspcl
->msg_buf
->scsi
[0];
1017 memset(scsi
, 0, sizeof(*scsi
));
1018 dma_address
= skspcl
->req
.sksg_dma_address
;
1019 scsi
->hdr
.sg_list_dma_address
= cpu_to_be64(dma_address
);
1020 skspcl
->req
.n_sg
= 1;
1021 sgd
->control
= FIT_SGD_CONTROL_LAST
;
1022 sgd
->byte_count
= 0;
1023 sgd
->host_side_addr
= skspcl
->db_dma_address
;
1024 sgd
->dev_side_addr
= 0;
1025 sgd
->next_desc_ptr
= 0LL;
1030 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1032 static void skd_send_internal_skspcl(struct skd_device
*skdev
,
1033 struct skd_special_context
*skspcl
,
1036 struct fit_sg_descriptor
*sgd
= &skspcl
->req
.sksg_list
[0];
1037 struct skd_scsi_request
*scsi
;
1038 unsigned char *buf
= skspcl
->data_buf
;
1041 if (skspcl
->req
.state
!= SKD_REQ_STATE_IDLE
)
1043 * A refresh is already in progress.
1044 * Just wait for it to finish.
1048 SKD_ASSERT((skspcl
->req
.id
& SKD_ID_INCR
) == 0);
1049 skspcl
->req
.state
= SKD_REQ_STATE_BUSY
;
1050 skspcl
->req
.id
+= SKD_ID_INCR
;
1052 scsi
= &skspcl
->msg_buf
->scsi
[0];
1053 scsi
->hdr
.tag
= skspcl
->req
.id
;
1055 memset(scsi
->cdb
, 0, sizeof(scsi
->cdb
));
1058 case TEST_UNIT_READY
:
1059 scsi
->cdb
[0] = TEST_UNIT_READY
;
1060 sgd
->byte_count
= 0;
1061 scsi
->hdr
.sg_list_len_bytes
= 0;
1065 scsi
->cdb
[0] = READ_CAPACITY
;
1066 sgd
->byte_count
= SKD_N_READ_CAP_BYTES
;
1067 scsi
->hdr
.sg_list_len_bytes
= cpu_to_be32(sgd
->byte_count
);
1071 scsi
->cdb
[0] = INQUIRY
;
1072 scsi
->cdb
[1] = 0x01; /* evpd */
1073 scsi
->cdb
[2] = 0x80; /* serial number page */
1074 scsi
->cdb
[4] = 0x10;
1075 sgd
->byte_count
= 16;
1076 scsi
->hdr
.sg_list_len_bytes
= cpu_to_be32(sgd
->byte_count
);
1079 case SYNCHRONIZE_CACHE
:
1080 scsi
->cdb
[0] = SYNCHRONIZE_CACHE
;
1081 sgd
->byte_count
= 0;
1082 scsi
->hdr
.sg_list_len_bytes
= 0;
1086 scsi
->cdb
[0] = WRITE_BUFFER
;
1087 scsi
->cdb
[1] = 0x02;
1088 scsi
->cdb
[7] = (WR_BUF_SIZE
& 0xFF00) >> 8;
1089 scsi
->cdb
[8] = WR_BUF_SIZE
& 0xFF;
1090 sgd
->byte_count
= WR_BUF_SIZE
;
1091 scsi
->hdr
.sg_list_len_bytes
= cpu_to_be32(sgd
->byte_count
);
1092 /* fill incrementing byte pattern */
1093 for (i
= 0; i
< sgd
->byte_count
; i
++)
1098 scsi
->cdb
[0] = READ_BUFFER
;
1099 scsi
->cdb
[1] = 0x02;
1100 scsi
->cdb
[7] = (WR_BUF_SIZE
& 0xFF00) >> 8;
1101 scsi
->cdb
[8] = WR_BUF_SIZE
& 0xFF;
1102 sgd
->byte_count
= WR_BUF_SIZE
;
1103 scsi
->hdr
.sg_list_len_bytes
= cpu_to_be32(sgd
->byte_count
);
1104 memset(skspcl
->data_buf
, 0, sgd
->byte_count
);
1108 SKD_ASSERT("Don't know what to send");
1112 skd_send_special_fitmsg(skdev
, skspcl
);
1115 static void skd_refresh_device_data(struct skd_device
*skdev
)
1117 struct skd_special_context
*skspcl
= &skdev
->internal_skspcl
;
1119 skd_send_internal_skspcl(skdev
, skspcl
, TEST_UNIT_READY
);
1122 static int skd_chk_read_buf(struct skd_device
*skdev
,
1123 struct skd_special_context
*skspcl
)
1125 unsigned char *buf
= skspcl
->data_buf
;
1128 /* check for incrementing byte pattern */
1129 for (i
= 0; i
< WR_BUF_SIZE
; i
++)
1130 if (buf
[i
] != (i
& 0xFF))
1136 static void skd_log_check_status(struct skd_device
*skdev
, u8 status
, u8 key
,
1137 u8 code
, u8 qual
, u8 fruc
)
1139 /* If the check condition is of special interest, log a message */
1140 if ((status
== SAM_STAT_CHECK_CONDITION
) && (key
== 0x02)
1141 && (code
== 0x04) && (qual
== 0x06)) {
1142 dev_err(&skdev
->pdev
->dev
,
1143 "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1144 key
, code
, qual
, fruc
);
1148 static void skd_complete_internal(struct skd_device
*skdev
,
1149 struct fit_completion_entry_v1
*skcomp
,
1150 struct fit_comp_error_info
*skerr
,
1151 struct skd_special_context
*skspcl
)
1153 u8
*buf
= skspcl
->data_buf
;
1156 struct skd_scsi_request
*scsi
= &skspcl
->msg_buf
->scsi
[0];
1158 lockdep_assert_held(&skdev
->lock
);
1160 SKD_ASSERT(skspcl
== &skdev
->internal_skspcl
);
1162 dev_dbg(&skdev
->pdev
->dev
, "complete internal %x\n", scsi
->cdb
[0]);
1164 skspcl
->req
.completion
= *skcomp
;
1165 skspcl
->req
.state
= SKD_REQ_STATE_IDLE
;
1166 skspcl
->req
.id
+= SKD_ID_INCR
;
1168 status
= skspcl
->req
.completion
.status
;
1170 skd_log_check_status(skdev
, status
, skerr
->key
, skerr
->code
,
1171 skerr
->qual
, skerr
->fruc
);
1173 switch (scsi
->cdb
[0]) {
1174 case TEST_UNIT_READY
:
1175 if (status
== SAM_STAT_GOOD
)
1176 skd_send_internal_skspcl(skdev
, skspcl
, WRITE_BUFFER
);
1177 else if ((status
== SAM_STAT_CHECK_CONDITION
) &&
1178 (skerr
->key
== MEDIUM_ERROR
))
1179 skd_send_internal_skspcl(skdev
, skspcl
, WRITE_BUFFER
);
1181 if (skdev
->state
== SKD_DRVR_STATE_STOPPING
) {
1182 dev_dbg(&skdev
->pdev
->dev
,
1183 "TUR failed, don't send anymore state 0x%x\n",
1187 dev_dbg(&skdev
->pdev
->dev
,
1188 "**** TUR failed, retry skerr\n");
1189 skd_send_internal_skspcl(skdev
, skspcl
,
1195 if (status
== SAM_STAT_GOOD
)
1196 skd_send_internal_skspcl(skdev
, skspcl
, READ_BUFFER
);
1198 if (skdev
->state
== SKD_DRVR_STATE_STOPPING
) {
1199 dev_dbg(&skdev
->pdev
->dev
,
1200 "write buffer failed, don't send anymore state 0x%x\n",
1204 dev_dbg(&skdev
->pdev
->dev
,
1205 "**** write buffer failed, retry skerr\n");
1206 skd_send_internal_skspcl(skdev
, skspcl
,
1212 if (status
== SAM_STAT_GOOD
) {
1213 if (skd_chk_read_buf(skdev
, skspcl
) == 0)
1214 skd_send_internal_skspcl(skdev
, skspcl
,
1217 dev_err(&skdev
->pdev
->dev
,
1218 "*** W/R Buffer mismatch %d ***\n",
1219 skdev
->connect_retries
);
1220 if (skdev
->connect_retries
<
1221 SKD_MAX_CONNECT_RETRIES
) {
1222 skdev
->connect_retries
++;
1223 skd_soft_reset(skdev
);
1225 dev_err(&skdev
->pdev
->dev
,
1226 "W/R Buffer Connect Error\n");
1232 if (skdev
->state
== SKD_DRVR_STATE_STOPPING
) {
1233 dev_dbg(&skdev
->pdev
->dev
,
1234 "read buffer failed, don't send anymore state 0x%x\n",
1238 dev_dbg(&skdev
->pdev
->dev
,
1239 "**** read buffer failed, retry skerr\n");
1240 skd_send_internal_skspcl(skdev
, skspcl
,
1246 skdev
->read_cap_is_valid
= 0;
1247 if (status
== SAM_STAT_GOOD
) {
1248 skdev
->read_cap_last_lba
=
1249 (buf
[0] << 24) | (buf
[1] << 16) |
1250 (buf
[2] << 8) | buf
[3];
1251 skdev
->read_cap_blocksize
=
1252 (buf
[4] << 24) | (buf
[5] << 16) |
1253 (buf
[6] << 8) | buf
[7];
1255 dev_dbg(&skdev
->pdev
->dev
, "last lba %d, bs %d\n",
1256 skdev
->read_cap_last_lba
,
1257 skdev
->read_cap_blocksize
);
1259 set_capacity(skdev
->disk
, skdev
->read_cap_last_lba
+ 1);
1261 skdev
->read_cap_is_valid
= 1;
1263 skd_send_internal_skspcl(skdev
, skspcl
, INQUIRY
);
1264 } else if ((status
== SAM_STAT_CHECK_CONDITION
) &&
1265 (skerr
->key
== MEDIUM_ERROR
)) {
1266 skdev
->read_cap_last_lba
= ~0;
1267 set_capacity(skdev
->disk
, skdev
->read_cap_last_lba
+ 1);
1268 dev_dbg(&skdev
->pdev
->dev
, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
1269 skd_send_internal_skspcl(skdev
, skspcl
, INQUIRY
);
1271 dev_dbg(&skdev
->pdev
->dev
, "**** READCAP failed, retry TUR\n");
1272 skd_send_internal_skspcl(skdev
, skspcl
,
1278 skdev
->inquiry_is_valid
= 0;
1279 if (status
== SAM_STAT_GOOD
) {
1280 skdev
->inquiry_is_valid
= 1;
1282 for (i
= 0; i
< 12; i
++)
1283 skdev
->inq_serial_num
[i
] = buf
[i
+ 4];
1284 skdev
->inq_serial_num
[12] = 0;
1287 if (skd_unquiesce_dev(skdev
) < 0)
1288 dev_dbg(&skdev
->pdev
->dev
, "**** failed, to ONLINE device\n");
1289 /* connection is complete */
1290 skdev
->connect_retries
= 0;
1293 case SYNCHRONIZE_CACHE
:
1294 if (status
== SAM_STAT_GOOD
)
1295 skdev
->sync_done
= 1;
1297 skdev
->sync_done
= -1;
1298 wake_up_interruptible(&skdev
->waitq
);
1302 SKD_ASSERT("we didn't send this");
1307 *****************************************************************************
1309 *****************************************************************************
1312 static void skd_send_fitmsg(struct skd_device
*skdev
,
1313 struct skd_fitmsg_context
*skmsg
)
1317 dev_dbg(&skdev
->pdev
->dev
, "dma address 0x%llx, busy=%d\n",
1318 skmsg
->mb_dma_address
, atomic_read(&skdev
->in_flight
));
1319 dev_dbg(&skdev
->pdev
->dev
, "msg_buf %p\n", skmsg
->msg_buf
);
1321 qcmd
= skmsg
->mb_dma_address
;
1322 qcmd
|= FIT_QCMD_QID_NORMAL
;
1324 if (unlikely(skdev
->dbg_level
> 1)) {
1325 u8
*bp
= (u8
*)skmsg
->msg_buf
;
1327 for (i
= 0; i
< skmsg
->length
; i
+= 8) {
1328 dev_dbg(&skdev
->pdev
->dev
, "msg[%2d] %8ph\n", i
,
1335 if (skmsg
->length
> 256)
1336 qcmd
|= FIT_QCMD_MSGSIZE_512
;
1337 else if (skmsg
->length
> 128)
1338 qcmd
|= FIT_QCMD_MSGSIZE_256
;
1339 else if (skmsg
->length
> 64)
1340 qcmd
|= FIT_QCMD_MSGSIZE_128
;
1343 * This makes no sense because the FIT msg header is
1344 * 64 bytes. If the msg is only 64 bytes long it has
1347 qcmd
|= FIT_QCMD_MSGSIZE_64
;
1349 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1352 SKD_WRITEQ(skdev
, qcmd
, FIT_Q_COMMAND
);
1355 static void skd_send_special_fitmsg(struct skd_device
*skdev
,
1356 struct skd_special_context
*skspcl
)
1360 if (unlikely(skdev
->dbg_level
> 1)) {
1361 u8
*bp
= (u8
*)skspcl
->msg_buf
;
1364 for (i
= 0; i
< SKD_N_SPECIAL_FITMSG_BYTES
; i
+= 8) {
1365 dev_dbg(&skdev
->pdev
->dev
, " spcl[%2d] %8ph\n", i
,
1371 dev_dbg(&skdev
->pdev
->dev
,
1372 "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
1373 skspcl
, skspcl
->req
.id
, skspcl
->req
.sksg_list
,
1374 skspcl
->req
.sksg_dma_address
);
1375 for (i
= 0; i
< skspcl
->req
.n_sg
; i
++) {
1376 struct fit_sg_descriptor
*sgd
=
1377 &skspcl
->req
.sksg_list
[i
];
1379 dev_dbg(&skdev
->pdev
->dev
,
1380 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
1381 i
, sgd
->byte_count
, sgd
->control
,
1382 sgd
->host_side_addr
, sgd
->next_desc_ptr
);
1387 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
1388 * and one 64-byte SSDI command.
1390 qcmd
= skspcl
->mb_dma_address
;
1391 qcmd
|= FIT_QCMD_QID_NORMAL
+ FIT_QCMD_MSGSIZE_128
;
1393 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1396 SKD_WRITEQ(skdev
, qcmd
, FIT_Q_COMMAND
);
1400 *****************************************************************************
1402 *****************************************************************************
1405 static void skd_complete_other(struct skd_device
*skdev
,
1406 struct fit_completion_entry_v1
*skcomp
,
1407 struct fit_comp_error_info
*skerr
);
1416 enum skd_check_status_action action
;
1419 static struct sns_info skd_chkstat_table
[] = {
1421 { 0x70, 0x02, RECOVERED_ERROR
, 0, 0, 0x1c,
1422 SKD_CHECK_STATUS_REPORT_GOOD
},
1425 { 0x70, 0x02, NO_SENSE
, 0x0B, 0x00, 0x1E, /* warnings */
1426 SKD_CHECK_STATUS_REPORT_SMART_ALERT
},
1427 { 0x70, 0x02, NO_SENSE
, 0x5D, 0x00, 0x1E, /* thresholds */
1428 SKD_CHECK_STATUS_REPORT_SMART_ALERT
},
1429 { 0x70, 0x02, RECOVERED_ERROR
, 0x0B, 0x01, 0x1F, /* temperature over trigger */
1430 SKD_CHECK_STATUS_REPORT_SMART_ALERT
},
1432 /* Retry (with limits) */
1433 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
1434 SKD_CHECK_STATUS_REQUEUE_REQUEST
},
1435 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
1436 SKD_CHECK_STATUS_REQUEUE_REQUEST
},
1437 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
1438 SKD_CHECK_STATUS_REQUEUE_REQUEST
},
1439 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
1440 SKD_CHECK_STATUS_REQUEUE_REQUEST
},
1442 /* Busy (or about to be) */
1443 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
1444 SKD_CHECK_STATUS_BUSY_IMMINENT
},
1448 * Look up status and sense data to decide how to handle the error
1450 * mask says which fields must match e.g., mask=0x18 means check
1451 * type and stat, ignore key, asc, ascq.
1454 static enum skd_check_status_action
1455 skd_check_status(struct skd_device
*skdev
,
1456 u8 cmp_status
, struct fit_comp_error_info
*skerr
)
1460 dev_err(&skdev
->pdev
->dev
, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1461 skerr
->key
, skerr
->code
, skerr
->qual
, skerr
->fruc
);
1463 dev_dbg(&skdev
->pdev
->dev
,
1464 "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
1465 skerr
->type
, cmp_status
, skerr
->key
, skerr
->code
, skerr
->qual
,
1468 /* Does the info match an entry in the good category? */
1469 for (i
= 0; i
< ARRAY_SIZE(skd_chkstat_table
); i
++) {
1470 struct sns_info
*sns
= &skd_chkstat_table
[i
];
1472 if (sns
->mask
& 0x10)
1473 if (skerr
->type
!= sns
->type
)
1476 if (sns
->mask
& 0x08)
1477 if (cmp_status
!= sns
->stat
)
1480 if (sns
->mask
& 0x04)
1481 if (skerr
->key
!= sns
->key
)
1484 if (sns
->mask
& 0x02)
1485 if (skerr
->code
!= sns
->asc
)
1488 if (sns
->mask
& 0x01)
1489 if (skerr
->qual
!= sns
->ascq
)
1492 if (sns
->action
== SKD_CHECK_STATUS_REPORT_SMART_ALERT
) {
1493 dev_err(&skdev
->pdev
->dev
,
1494 "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
1495 skerr
->key
, skerr
->code
, skerr
->qual
);
1500 /* No other match, so nonzero status means error,
1501 * zero status means good
1504 dev_dbg(&skdev
->pdev
->dev
, "status check: error\n");
1505 return SKD_CHECK_STATUS_REPORT_ERROR
;
1508 dev_dbg(&skdev
->pdev
->dev
, "status check good default\n");
1509 return SKD_CHECK_STATUS_REPORT_GOOD
;
1512 static void skd_resolve_req_exception(struct skd_device
*skdev
,
1513 struct skd_request_context
*skreq
,
1514 struct request
*req
)
1516 u8 cmp_status
= skreq
->completion
.status
;
1518 switch (skd_check_status(skdev
, cmp_status
, &skreq
->err_info
)) {
1519 case SKD_CHECK_STATUS_REPORT_GOOD
:
1520 case SKD_CHECK_STATUS_REPORT_SMART_ALERT
:
1521 skd_end_request(skdev
, req
, BLK_STS_OK
);
1524 case SKD_CHECK_STATUS_BUSY_IMMINENT
:
1525 skd_log_skreq(skdev
, skreq
, "retry(busy)");
1526 blk_requeue_request(skdev
->queue
, req
);
1527 dev_info(&skdev
->pdev
->dev
, "drive BUSY imminent\n");
1528 skdev
->state
= SKD_DRVR_STATE_BUSY_IMMINENT
;
1529 skdev
->timer_countdown
= SKD_TIMER_MINUTES(20);
1530 skd_quiesce_dev(skdev
);
1533 case SKD_CHECK_STATUS_REQUEUE_REQUEST
:
1534 if ((unsigned long) ++req
->special
< SKD_MAX_RETRIES
) {
1535 skd_log_skreq(skdev
, skreq
, "retry");
1536 blk_requeue_request(skdev
->queue
, req
);
1541 case SKD_CHECK_STATUS_REPORT_ERROR
:
1543 skd_end_request(skdev
, req
, BLK_STS_IOERR
);
1548 /* assume spinlock is already held */
1549 static void skd_release_skreq(struct skd_device
*skdev
,
1550 struct skd_request_context
*skreq
)
1555 * Decrease the number of active requests.
1556 * Also decrements the count in the timeout slot.
1558 SKD_ASSERT(atomic_read(&skdev
->in_flight
) > 0);
1559 atomic_dec(&skdev
->in_flight
);
1561 timo_slot
= skreq
->timeout_stamp
& SKD_TIMEOUT_SLOT_MASK
;
1562 SKD_ASSERT(atomic_read(&skdev
->timeout_slot
[timo_slot
]) > 0);
1563 atomic_dec(&skdev
->timeout_slot
[timo_slot
]);
1571 * Reclaim the skd_request_context
1573 skreq
->state
= SKD_REQ_STATE_IDLE
;
1574 skreq
->id
+= SKD_ID_INCR
;
1577 static struct skd_request_context
*skd_skreq_from_rq(struct skd_device
*skdev
,
1580 struct skd_request_context
*skreq
;
1583 for (i
= 0, skreq
= skdev
->skreq_table
; i
< skdev
->num_fitmsg_context
;
1585 if (skreq
->req
== rq
)
1591 static int skd_isr_completion_posted(struct skd_device
*skdev
,
1592 int limit
, int *enqueued
)
1594 struct fit_completion_entry_v1
*skcmp
;
1595 struct fit_comp_error_info
*skerr
;
1599 struct skd_request_context
*skreq
;
1607 lockdep_assert_held(&skdev
->lock
);
1610 SKD_ASSERT(skdev
->skcomp_ix
< SKD_N_COMPLETION_ENTRY
);
1612 skcmp
= &skdev
->skcomp_table
[skdev
->skcomp_ix
];
1613 cmp_cycle
= skcmp
->cycle
;
1614 cmp_cntxt
= skcmp
->tag
;
1615 cmp_status
= skcmp
->status
;
1616 cmp_bytes
= be32_to_cpu(skcmp
->num_returned_bytes
);
1618 skerr
= &skdev
->skerr_table
[skdev
->skcomp_ix
];
1620 dev_dbg(&skdev
->pdev
->dev
,
1621 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
1622 skdev
->skcomp_cycle
, skdev
->skcomp_ix
, cmp_cycle
,
1623 cmp_cntxt
, cmp_status
, atomic_read(&skdev
->in_flight
),
1624 cmp_bytes
, skdev
->proto_ver
);
1626 if (cmp_cycle
!= skdev
->skcomp_cycle
) {
1627 dev_dbg(&skdev
->pdev
->dev
, "end of completions\n");
1631 * Update the completion queue head index and possibly
1632 * the completion cycle count. 8-bit wrap-around.
1635 if (skdev
->skcomp_ix
>= SKD_N_COMPLETION_ENTRY
) {
1636 skdev
->skcomp_ix
= 0;
1637 skdev
->skcomp_cycle
++;
1641 * The command context is a unique 32-bit ID. The low order
1642 * bits help locate the request. The request is usually a
1643 * r/w request (see skd_start() above) or a special request.
1646 tag
= req_id
& SKD_ID_SLOT_AND_TABLE_MASK
;
1648 /* Is this other than a r/w request? */
1649 if (tag
>= skdev
->num_req_context
) {
1651 * This is not a completion for a r/w request.
1653 WARN_ON_ONCE(blk_map_queue_find_tag(skdev
->queue
->
1655 skd_complete_other(skdev
, skcmp
, skerr
);
1659 rq
= blk_map_queue_find_tag(skdev
->queue
->queue_tags
, tag
);
1660 if (WARN(!rq
, "No request for tag %#x -> %#x\n", cmp_cntxt
,
1663 skreq
= skd_skreq_from_rq(skdev
, rq
);
1666 * Make sure the request ID for the slot matches.
1668 if (skreq
->id
!= req_id
) {
1669 dev_dbg(&skdev
->pdev
->dev
,
1670 "mismatch comp_id=0x%x req_id=0x%x\n", req_id
,
1673 u16 new_id
= cmp_cntxt
;
1674 dev_err(&skdev
->pdev
->dev
,
1675 "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
1676 req_id
, skreq
->id
, new_id
);
1682 SKD_ASSERT(skreq
->state
== SKD_REQ_STATE_BUSY
);
1684 skreq
->completion
= *skcmp
;
1685 if (unlikely(cmp_status
== SAM_STAT_CHECK_CONDITION
)) {
1686 skreq
->err_info
= *skerr
;
1687 skd_log_check_status(skdev
, cmp_status
, skerr
->key
,
1688 skerr
->code
, skerr
->qual
,
1691 /* Release DMA resources for the request. */
1692 if (skreq
->n_sg
> 0)
1693 skd_postop_sg_list(skdev
, skreq
);
1695 /* Mark the FIT msg and timeout slot as free. */
1696 skd_release_skreq(skdev
, skreq
);
1699 * Capture the outcome and post it back to the native request.
1701 if (likely(cmp_status
== SAM_STAT_GOOD
))
1702 skd_end_request(skdev
, rq
, BLK_STS_OK
);
1704 skd_resolve_req_exception(skdev
, skreq
, rq
);
1706 /* skd_isr_comp_limit equal zero means no limit */
1708 if (++processed
>= limit
) {
1715 if (skdev
->state
== SKD_DRVR_STATE_PAUSING
&&
1716 atomic_read(&skdev
->in_flight
) == 0) {
1717 skdev
->state
= SKD_DRVR_STATE_PAUSED
;
1718 wake_up_interruptible(&skdev
->waitq
);
1724 static void skd_complete_other(struct skd_device
*skdev
,
1725 struct fit_completion_entry_v1
*skcomp
,
1726 struct fit_comp_error_info
*skerr
)
1731 struct skd_special_context
*skspcl
;
1733 lockdep_assert_held(&skdev
->lock
);
1735 req_id
= skcomp
->tag
;
1736 req_table
= req_id
& SKD_ID_TABLE_MASK
;
1737 req_slot
= req_id
& SKD_ID_SLOT_MASK
;
1739 dev_dbg(&skdev
->pdev
->dev
, "table=0x%x id=0x%x slot=%d\n", req_table
,
1743 * Based on the request id, determine how to dispatch this completion.
1744 * This swich/case is finding the good cases and forwarding the
1745 * completion entry. Errors are reported below the switch.
1747 switch (req_table
) {
1748 case SKD_ID_RW_REQUEST
:
1750 * The caller, skd_isr_completion_posted() above,
1751 * handles r/w requests. The only way we get here
1752 * is if the req_slot is out of bounds.
1756 case SKD_ID_INTERNAL
:
1757 if (req_slot
== 0) {
1758 skspcl
= &skdev
->internal_skspcl
;
1759 if (skspcl
->req
.id
== req_id
&&
1760 skspcl
->req
.state
== SKD_REQ_STATE_BUSY
) {
1761 skd_complete_internal(skdev
,
1762 skcomp
, skerr
, skspcl
);
1768 case SKD_ID_FIT_MSG
:
1770 * These id's should never appear in a completion record.
1776 * These id's should never appear anywhere;
1782 * If we get here it is a bad or stale id.
1786 static void skd_reset_skcomp(struct skd_device
*skdev
)
1788 memset(skdev
->skcomp_table
, 0, SKD_SKCOMP_SIZE
);
1790 skdev
->skcomp_ix
= 0;
1791 skdev
->skcomp_cycle
= 1;
1795 *****************************************************************************
1797 *****************************************************************************
1799 static void skd_completion_worker(struct work_struct
*work
)
1801 struct skd_device
*skdev
=
1802 container_of(work
, struct skd_device
, completion_worker
);
1803 unsigned long flags
;
1804 int flush_enqueued
= 0;
1806 spin_lock_irqsave(&skdev
->lock
, flags
);
1809 * pass in limit=0, which means no limit..
1810 * process everything in compq
1812 skd_isr_completion_posted(skdev
, 0, &flush_enqueued
);
1813 blk_run_queue_async(skdev
->queue
);
1815 spin_unlock_irqrestore(&skdev
->lock
, flags
);
1818 static void skd_isr_msg_from_dev(struct skd_device
*skdev
);
1821 skd_isr(int irq
, void *ptr
)
1823 struct skd_device
*skdev
= ptr
;
1828 int flush_enqueued
= 0;
1830 spin_lock(&skdev
->lock
);
1833 intstat
= SKD_READL(skdev
, FIT_INT_STATUS_HOST
);
1835 ack
= FIT_INT_DEF_MASK
;
1838 dev_dbg(&skdev
->pdev
->dev
, "intstat=0x%x ack=0x%x\n", intstat
,
1841 /* As long as there is an int pending on device, keep
1842 * running loop. When none, get out, but if we've never
1843 * done any processing, call completion handler?
1846 /* No interrupts on device, but run the completion
1850 if (likely (skdev
->state
1851 == SKD_DRVR_STATE_ONLINE
))
1858 SKD_WRITEL(skdev
, ack
, FIT_INT_STATUS_HOST
);
1860 if (likely((skdev
->state
!= SKD_DRVR_STATE_LOAD
) &&
1861 (skdev
->state
!= SKD_DRVR_STATE_STOPPING
))) {
1862 if (intstat
& FIT_ISH_COMPLETION_POSTED
) {
1864 * If we have already deferred completion
1865 * processing, don't bother running it again
1869 skd_isr_completion_posted(skdev
,
1870 skd_isr_comp_limit
, &flush_enqueued
);
1873 if (intstat
& FIT_ISH_FW_STATE_CHANGE
) {
1874 skd_isr_fwstate(skdev
);
1875 if (skdev
->state
== SKD_DRVR_STATE_FAULT
||
1877 SKD_DRVR_STATE_DISAPPEARED
) {
1878 spin_unlock(&skdev
->lock
);
1883 if (intstat
& FIT_ISH_MSG_FROM_DEV
)
1884 skd_isr_msg_from_dev(skdev
);
1888 if (unlikely(flush_enqueued
))
1889 blk_run_queue_async(skdev
->queue
);
1892 schedule_work(&skdev
->completion_worker
);
1893 else if (!flush_enqueued
)
1894 blk_run_queue_async(skdev
->queue
);
1896 spin_unlock(&skdev
->lock
);
1901 static void skd_drive_fault(struct skd_device
*skdev
)
1903 skdev
->state
= SKD_DRVR_STATE_FAULT
;
1904 dev_err(&skdev
->pdev
->dev
, "Drive FAULT\n");
1907 static void skd_drive_disappeared(struct skd_device
*skdev
)
1909 skdev
->state
= SKD_DRVR_STATE_DISAPPEARED
;
1910 dev_err(&skdev
->pdev
->dev
, "Drive DISAPPEARED\n");
1913 static void skd_isr_fwstate(struct skd_device
*skdev
)
1918 int prev_driver_state
= skdev
->state
;
1920 sense
= SKD_READL(skdev
, FIT_STATUS
);
1921 state
= sense
& FIT_SR_DRIVE_STATE_MASK
;
1923 dev_err(&skdev
->pdev
->dev
, "s1120 state %s(%d)=>%s(%d)\n",
1924 skd_drive_state_to_str(skdev
->drive_state
), skdev
->drive_state
,
1925 skd_drive_state_to_str(state
), state
);
1927 skdev
->drive_state
= state
;
1929 switch (skdev
->drive_state
) {
1930 case FIT_SR_DRIVE_INIT
:
1931 if (skdev
->state
== SKD_DRVR_STATE_PROTOCOL_MISMATCH
) {
1932 skd_disable_interrupts(skdev
);
1935 if (skdev
->state
== SKD_DRVR_STATE_RESTARTING
)
1936 skd_recover_requests(skdev
);
1937 if (skdev
->state
== SKD_DRVR_STATE_WAIT_BOOT
) {
1938 skdev
->timer_countdown
= SKD_STARTING_TIMO
;
1939 skdev
->state
= SKD_DRVR_STATE_STARTING
;
1940 skd_soft_reset(skdev
);
1943 mtd
= FIT_MXD_CONS(FIT_MTD_FITFW_INIT
, 0, 0);
1944 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
1945 skdev
->last_mtd
= mtd
;
1948 case FIT_SR_DRIVE_ONLINE
:
1949 skdev
->cur_max_queue_depth
= skd_max_queue_depth
;
1950 if (skdev
->cur_max_queue_depth
> skdev
->dev_max_queue_depth
)
1951 skdev
->cur_max_queue_depth
= skdev
->dev_max_queue_depth
;
1953 skdev
->queue_low_water_mark
=
1954 skdev
->cur_max_queue_depth
* 2 / 3 + 1;
1955 if (skdev
->queue_low_water_mark
< 1)
1956 skdev
->queue_low_water_mark
= 1;
1957 dev_info(&skdev
->pdev
->dev
,
1958 "Queue depth limit=%d dev=%d lowat=%d\n",
1959 skdev
->cur_max_queue_depth
,
1960 skdev
->dev_max_queue_depth
,
1961 skdev
->queue_low_water_mark
);
1963 skd_refresh_device_data(skdev
);
1966 case FIT_SR_DRIVE_BUSY
:
1967 skdev
->state
= SKD_DRVR_STATE_BUSY
;
1968 skdev
->timer_countdown
= SKD_BUSY_TIMO
;
1969 skd_quiesce_dev(skdev
);
1971 case FIT_SR_DRIVE_BUSY_SANITIZE
:
1972 /* set timer for 3 seconds, we'll abort any unfinished
1973 * commands after that expires
1975 skdev
->state
= SKD_DRVR_STATE_BUSY_SANITIZE
;
1976 skdev
->timer_countdown
= SKD_TIMER_SECONDS(3);
1977 blk_start_queue(skdev
->queue
);
1979 case FIT_SR_DRIVE_BUSY_ERASE
:
1980 skdev
->state
= SKD_DRVR_STATE_BUSY_ERASE
;
1981 skdev
->timer_countdown
= SKD_BUSY_TIMO
;
1983 case FIT_SR_DRIVE_OFFLINE
:
1984 skdev
->state
= SKD_DRVR_STATE_IDLE
;
1986 case FIT_SR_DRIVE_SOFT_RESET
:
1987 switch (skdev
->state
) {
1988 case SKD_DRVR_STATE_STARTING
:
1989 case SKD_DRVR_STATE_RESTARTING
:
1990 /* Expected by a caller of skd_soft_reset() */
1993 skdev
->state
= SKD_DRVR_STATE_RESTARTING
;
1997 case FIT_SR_DRIVE_FW_BOOTING
:
1998 dev_dbg(&skdev
->pdev
->dev
, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
1999 skdev
->state
= SKD_DRVR_STATE_WAIT_BOOT
;
2000 skdev
->timer_countdown
= SKD_WAIT_BOOT_TIMO
;
2003 case FIT_SR_DRIVE_DEGRADED
:
2004 case FIT_SR_PCIE_LINK_DOWN
:
2005 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD
:
2008 case FIT_SR_DRIVE_FAULT
:
2009 skd_drive_fault(skdev
);
2010 skd_recover_requests(skdev
);
2011 blk_start_queue(skdev
->queue
);
2014 /* PCIe bus returned all Fs? */
2016 dev_info(&skdev
->pdev
->dev
, "state=0x%x sense=0x%x\n", state
,
2018 skd_drive_disappeared(skdev
);
2019 skd_recover_requests(skdev
);
2020 blk_start_queue(skdev
->queue
);
2024 * Uknown FW State. Wait for a state we recognize.
2028 dev_err(&skdev
->pdev
->dev
, "Driver state %s(%d)=>%s(%d)\n",
2029 skd_skdev_state_to_str(prev_driver_state
), prev_driver_state
,
2030 skd_skdev_state_to_str(skdev
->state
), skdev
->state
);
2033 static void skd_recover_requests(struct skd_device
*skdev
)
2037 for (i
= 0; i
< skdev
->num_req_context
; i
++) {
2038 struct skd_request_context
*skreq
= &skdev
->skreq_table
[i
];
2039 struct request
*req
= skreq
->req
;
2041 if (skreq
->state
== SKD_REQ_STATE_BUSY
) {
2042 skd_log_skreq(skdev
, skreq
, "recover");
2044 SKD_ASSERT((skreq
->id
& SKD_ID_INCR
) != 0);
2045 SKD_ASSERT(req
!= NULL
);
2047 /* Release DMA resources for the request. */
2048 if (skreq
->n_sg
> 0)
2049 skd_postop_sg_list(skdev
, skreq
);
2053 skreq
->state
= SKD_REQ_STATE_IDLE
;
2054 skreq
->id
+= SKD_ID_INCR
;
2056 skd_end_request(skdev
, req
, BLK_STS_IOERR
);
2060 for (i
= 0; i
< SKD_N_TIMEOUT_SLOT
; i
++)
2061 atomic_set(&skdev
->timeout_slot
[i
], 0);
2063 atomic_set(&skdev
->in_flight
, 0);
2066 static void skd_isr_msg_from_dev(struct skd_device
*skdev
)
2072 mfd
= SKD_READL(skdev
, FIT_MSG_FROM_DEVICE
);
2074 dev_dbg(&skdev
->pdev
->dev
, "mfd=0x%x last_mtd=0x%x\n", mfd
,
2077 /* ignore any mtd that is an ack for something we didn't send */
2078 if (FIT_MXD_TYPE(mfd
) != FIT_MXD_TYPE(skdev
->last_mtd
))
2081 switch (FIT_MXD_TYPE(mfd
)) {
2082 case FIT_MTD_FITFW_INIT
:
2083 skdev
->proto_ver
= FIT_PROTOCOL_MAJOR_VER(mfd
);
2085 if (skdev
->proto_ver
!= FIT_PROTOCOL_VERSION_1
) {
2086 dev_err(&skdev
->pdev
->dev
, "protocol mismatch\n");
2087 dev_err(&skdev
->pdev
->dev
, " got=%d support=%d\n",
2088 skdev
->proto_ver
, FIT_PROTOCOL_VERSION_1
);
2089 dev_err(&skdev
->pdev
->dev
, " please upgrade driver\n");
2090 skdev
->state
= SKD_DRVR_STATE_PROTOCOL_MISMATCH
;
2091 skd_soft_reset(skdev
);
2094 mtd
= FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH
, 0, 0);
2095 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
2096 skdev
->last_mtd
= mtd
;
2099 case FIT_MTD_GET_CMDQ_DEPTH
:
2100 skdev
->dev_max_queue_depth
= FIT_MXD_DATA(mfd
);
2101 mtd
= FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH
, 0,
2102 SKD_N_COMPLETION_ENTRY
);
2103 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
2104 skdev
->last_mtd
= mtd
;
2107 case FIT_MTD_SET_COMPQ_DEPTH
:
2108 SKD_WRITEQ(skdev
, skdev
->cq_dma_address
, FIT_MSG_TO_DEVICE_ARG
);
2109 mtd
= FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR
, 0, 0);
2110 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
2111 skdev
->last_mtd
= mtd
;
2114 case FIT_MTD_SET_COMPQ_ADDR
:
2115 skd_reset_skcomp(skdev
);
2116 mtd
= FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID
, 0, skdev
->devno
);
2117 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
2118 skdev
->last_mtd
= mtd
;
2121 case FIT_MTD_CMD_LOG_HOST_ID
:
2122 skdev
->connect_time_stamp
= get_seconds();
2123 data
= skdev
->connect_time_stamp
& 0xFFFF;
2124 mtd
= FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO
, 0, data
);
2125 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
2126 skdev
->last_mtd
= mtd
;
2129 case FIT_MTD_CMD_LOG_TIME_STAMP_LO
:
2130 skdev
->drive_jiffies
= FIT_MXD_DATA(mfd
);
2131 data
= (skdev
->connect_time_stamp
>> 16) & 0xFFFF;
2132 mtd
= FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI
, 0, data
);
2133 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
2134 skdev
->last_mtd
= mtd
;
2137 case FIT_MTD_CMD_LOG_TIME_STAMP_HI
:
2138 skdev
->drive_jiffies
|= (FIT_MXD_DATA(mfd
) << 16);
2139 mtd
= FIT_MXD_CONS(FIT_MTD_ARM_QUEUE
, 0, 0);
2140 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
2141 skdev
->last_mtd
= mtd
;
2143 dev_err(&skdev
->pdev
->dev
, "Time sync driver=0x%x device=0x%x\n",
2144 skdev
->connect_time_stamp
, skdev
->drive_jiffies
);
2147 case FIT_MTD_ARM_QUEUE
:
2148 skdev
->last_mtd
= 0;
2150 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
2159 static void skd_disable_interrupts(struct skd_device
*skdev
)
2163 sense
= SKD_READL(skdev
, FIT_CONTROL
);
2164 sense
&= ~FIT_CR_ENABLE_INTERRUPTS
;
2165 SKD_WRITEL(skdev
, sense
, FIT_CONTROL
);
2166 dev_dbg(&skdev
->pdev
->dev
, "sense 0x%x\n", sense
);
2168 /* Note that the 1s is written. A 1-bit means
2169 * disable, a 0 means enable.
2171 SKD_WRITEL(skdev
, ~0, FIT_INT_MASK_HOST
);
2174 static void skd_enable_interrupts(struct skd_device
*skdev
)
2178 /* unmask interrupts first */
2179 val
= FIT_ISH_FW_STATE_CHANGE
+
2180 FIT_ISH_COMPLETION_POSTED
+ FIT_ISH_MSG_FROM_DEV
;
2182 /* Note that the compliment of mask is written. A 1-bit means
2183 * disable, a 0 means enable. */
2184 SKD_WRITEL(skdev
, ~val
, FIT_INT_MASK_HOST
);
2185 dev_dbg(&skdev
->pdev
->dev
, "interrupt mask=0x%x\n", ~val
);
2187 val
= SKD_READL(skdev
, FIT_CONTROL
);
2188 val
|= FIT_CR_ENABLE_INTERRUPTS
;
2189 dev_dbg(&skdev
->pdev
->dev
, "control=0x%x\n", val
);
2190 SKD_WRITEL(skdev
, val
, FIT_CONTROL
);
2194 *****************************************************************************
2195 * START, STOP, RESTART, QUIESCE, UNQUIESCE
2196 *****************************************************************************
2199 static void skd_soft_reset(struct skd_device
*skdev
)
2203 val
= SKD_READL(skdev
, FIT_CONTROL
);
2204 val
|= (FIT_CR_SOFT_RESET
);
2205 dev_dbg(&skdev
->pdev
->dev
, "control=0x%x\n", val
);
2206 SKD_WRITEL(skdev
, val
, FIT_CONTROL
);
2209 static void skd_start_device(struct skd_device
*skdev
)
2211 unsigned long flags
;
2215 spin_lock_irqsave(&skdev
->lock
, flags
);
2217 /* ack all ghost interrupts */
2218 SKD_WRITEL(skdev
, FIT_INT_DEF_MASK
, FIT_INT_STATUS_HOST
);
2220 sense
= SKD_READL(skdev
, FIT_STATUS
);
2222 dev_dbg(&skdev
->pdev
->dev
, "initial status=0x%x\n", sense
);
2224 state
= sense
& FIT_SR_DRIVE_STATE_MASK
;
2225 skdev
->drive_state
= state
;
2226 skdev
->last_mtd
= 0;
2228 skdev
->state
= SKD_DRVR_STATE_STARTING
;
2229 skdev
->timer_countdown
= SKD_STARTING_TIMO
;
2231 skd_enable_interrupts(skdev
);
2233 switch (skdev
->drive_state
) {
2234 case FIT_SR_DRIVE_OFFLINE
:
2235 dev_err(&skdev
->pdev
->dev
, "Drive offline...\n");
2238 case FIT_SR_DRIVE_FW_BOOTING
:
2239 dev_dbg(&skdev
->pdev
->dev
, "FIT_SR_DRIVE_FW_BOOTING\n");
2240 skdev
->state
= SKD_DRVR_STATE_WAIT_BOOT
;
2241 skdev
->timer_countdown
= SKD_WAIT_BOOT_TIMO
;
2244 case FIT_SR_DRIVE_BUSY_SANITIZE
:
2245 dev_info(&skdev
->pdev
->dev
, "Start: BUSY_SANITIZE\n");
2246 skdev
->state
= SKD_DRVR_STATE_BUSY_SANITIZE
;
2247 skdev
->timer_countdown
= SKD_STARTED_BUSY_TIMO
;
2250 case FIT_SR_DRIVE_BUSY_ERASE
:
2251 dev_info(&skdev
->pdev
->dev
, "Start: BUSY_ERASE\n");
2252 skdev
->state
= SKD_DRVR_STATE_BUSY_ERASE
;
2253 skdev
->timer_countdown
= SKD_STARTED_BUSY_TIMO
;
2256 case FIT_SR_DRIVE_INIT
:
2257 case FIT_SR_DRIVE_ONLINE
:
2258 skd_soft_reset(skdev
);
2261 case FIT_SR_DRIVE_BUSY
:
2262 dev_err(&skdev
->pdev
->dev
, "Drive Busy...\n");
2263 skdev
->state
= SKD_DRVR_STATE_BUSY
;
2264 skdev
->timer_countdown
= SKD_STARTED_BUSY_TIMO
;
2267 case FIT_SR_DRIVE_SOFT_RESET
:
2268 dev_err(&skdev
->pdev
->dev
, "drive soft reset in prog\n");
2271 case FIT_SR_DRIVE_FAULT
:
2272 /* Fault state is bad...soft reset won't do it...
2273 * Hard reset, maybe, but does it work on device?
2274 * For now, just fault so the system doesn't hang.
2276 skd_drive_fault(skdev
);
2277 /*start the queue so we can respond with error to requests */
2278 dev_dbg(&skdev
->pdev
->dev
, "starting queue\n");
2279 blk_start_queue(skdev
->queue
);
2280 skdev
->gendisk_on
= -1;
2281 wake_up_interruptible(&skdev
->waitq
);
2285 /* Most likely the device isn't there or isn't responding
2286 * to the BAR1 addresses. */
2287 skd_drive_disappeared(skdev
);
2288 /*start the queue so we can respond with error to requests */
2289 dev_dbg(&skdev
->pdev
->dev
,
2290 "starting queue to error-out reqs\n");
2291 blk_start_queue(skdev
->queue
);
2292 skdev
->gendisk_on
= -1;
2293 wake_up_interruptible(&skdev
->waitq
);
2297 dev_err(&skdev
->pdev
->dev
, "Start: unknown state %x\n",
2298 skdev
->drive_state
);
2302 state
= SKD_READL(skdev
, FIT_CONTROL
);
2303 dev_dbg(&skdev
->pdev
->dev
, "FIT Control Status=0x%x\n", state
);
2305 state
= SKD_READL(skdev
, FIT_INT_STATUS_HOST
);
2306 dev_dbg(&skdev
->pdev
->dev
, "Intr Status=0x%x\n", state
);
2308 state
= SKD_READL(skdev
, FIT_INT_MASK_HOST
);
2309 dev_dbg(&skdev
->pdev
->dev
, "Intr Mask=0x%x\n", state
);
2311 state
= SKD_READL(skdev
, FIT_MSG_FROM_DEVICE
);
2312 dev_dbg(&skdev
->pdev
->dev
, "Msg from Dev=0x%x\n", state
);
2314 state
= SKD_READL(skdev
, FIT_HW_VERSION
);
2315 dev_dbg(&skdev
->pdev
->dev
, "HW version=0x%x\n", state
);
2317 spin_unlock_irqrestore(&skdev
->lock
, flags
);
2320 static void skd_stop_device(struct skd_device
*skdev
)
2322 unsigned long flags
;
2323 struct skd_special_context
*skspcl
= &skdev
->internal_skspcl
;
2327 spin_lock_irqsave(&skdev
->lock
, flags
);
2329 if (skdev
->state
!= SKD_DRVR_STATE_ONLINE
) {
2330 dev_err(&skdev
->pdev
->dev
, "%s not online no sync\n", __func__
);
2334 if (skspcl
->req
.state
!= SKD_REQ_STATE_IDLE
) {
2335 dev_err(&skdev
->pdev
->dev
, "%s no special\n", __func__
);
2339 skdev
->state
= SKD_DRVR_STATE_SYNCING
;
2340 skdev
->sync_done
= 0;
2342 skd_send_internal_skspcl(skdev
, skspcl
, SYNCHRONIZE_CACHE
);
2344 spin_unlock_irqrestore(&skdev
->lock
, flags
);
2346 wait_event_interruptible_timeout(skdev
->waitq
,
2347 (skdev
->sync_done
), (10 * HZ
));
2349 spin_lock_irqsave(&skdev
->lock
, flags
);
2351 switch (skdev
->sync_done
) {
2353 dev_err(&skdev
->pdev
->dev
, "%s no sync\n", __func__
);
2356 dev_err(&skdev
->pdev
->dev
, "%s sync done\n", __func__
);
2359 dev_err(&skdev
->pdev
->dev
, "%s sync error\n", __func__
);
2363 skdev
->state
= SKD_DRVR_STATE_STOPPING
;
2364 spin_unlock_irqrestore(&skdev
->lock
, flags
);
2366 skd_kill_timer(skdev
);
2368 spin_lock_irqsave(&skdev
->lock
, flags
);
2369 skd_disable_interrupts(skdev
);
2371 /* ensure all ints on device are cleared */
2372 /* soft reset the device to unload with a clean slate */
2373 SKD_WRITEL(skdev
, FIT_INT_DEF_MASK
, FIT_INT_STATUS_HOST
);
2374 SKD_WRITEL(skdev
, FIT_CR_SOFT_RESET
, FIT_CONTROL
);
2376 spin_unlock_irqrestore(&skdev
->lock
, flags
);
2378 /* poll every 100ms, 1 second timeout */
2379 for (i
= 0; i
< 10; i
++) {
2381 SKD_READL(skdev
, FIT_STATUS
) & FIT_SR_DRIVE_STATE_MASK
;
2382 if (dev_state
== FIT_SR_DRIVE_INIT
)
2384 set_current_state(TASK_INTERRUPTIBLE
);
2385 schedule_timeout(msecs_to_jiffies(100));
2388 if (dev_state
!= FIT_SR_DRIVE_INIT
)
2389 dev_err(&skdev
->pdev
->dev
, "%s state error 0x%02x\n", __func__
,
2393 /* assume spinlock is held */
2394 static void skd_restart_device(struct skd_device
*skdev
)
2398 /* ack all ghost interrupts */
2399 SKD_WRITEL(skdev
, FIT_INT_DEF_MASK
, FIT_INT_STATUS_HOST
);
2401 state
= SKD_READL(skdev
, FIT_STATUS
);
2403 dev_dbg(&skdev
->pdev
->dev
, "drive status=0x%x\n", state
);
2405 state
&= FIT_SR_DRIVE_STATE_MASK
;
2406 skdev
->drive_state
= state
;
2407 skdev
->last_mtd
= 0;
2409 skdev
->state
= SKD_DRVR_STATE_RESTARTING
;
2410 skdev
->timer_countdown
= SKD_RESTARTING_TIMO
;
2412 skd_soft_reset(skdev
);
2415 /* assume spinlock is held */
2416 static int skd_quiesce_dev(struct skd_device
*skdev
)
2420 switch (skdev
->state
) {
2421 case SKD_DRVR_STATE_BUSY
:
2422 case SKD_DRVR_STATE_BUSY_IMMINENT
:
2423 dev_dbg(&skdev
->pdev
->dev
, "stopping queue\n");
2424 blk_stop_queue(skdev
->queue
);
2426 case SKD_DRVR_STATE_ONLINE
:
2427 case SKD_DRVR_STATE_STOPPING
:
2428 case SKD_DRVR_STATE_SYNCING
:
2429 case SKD_DRVR_STATE_PAUSING
:
2430 case SKD_DRVR_STATE_PAUSED
:
2431 case SKD_DRVR_STATE_STARTING
:
2432 case SKD_DRVR_STATE_RESTARTING
:
2433 case SKD_DRVR_STATE_RESUMING
:
2436 dev_dbg(&skdev
->pdev
->dev
, "state [%d] not implemented\n",
2442 /* assume spinlock is held */
2443 static int skd_unquiesce_dev(struct skd_device
*skdev
)
2445 int prev_driver_state
= skdev
->state
;
2447 skd_log_skdev(skdev
, "unquiesce");
2448 if (skdev
->state
== SKD_DRVR_STATE_ONLINE
) {
2449 dev_dbg(&skdev
->pdev
->dev
, "**** device already ONLINE\n");
2452 if (skdev
->drive_state
!= FIT_SR_DRIVE_ONLINE
) {
2454 * If there has been an state change to other than
2455 * ONLINE, we will rely on controller state change
2456 * to come back online and restart the queue.
2457 * The BUSY state means that driver is ready to
2458 * continue normal processing but waiting for controller
2459 * to become available.
2461 skdev
->state
= SKD_DRVR_STATE_BUSY
;
2462 dev_dbg(&skdev
->pdev
->dev
, "drive BUSY state\n");
2467 * Drive has just come online, driver is either in startup,
2468 * paused performing a task, or bust waiting for hardware.
2470 switch (skdev
->state
) {
2471 case SKD_DRVR_STATE_PAUSED
:
2472 case SKD_DRVR_STATE_BUSY
:
2473 case SKD_DRVR_STATE_BUSY_IMMINENT
:
2474 case SKD_DRVR_STATE_BUSY_ERASE
:
2475 case SKD_DRVR_STATE_STARTING
:
2476 case SKD_DRVR_STATE_RESTARTING
:
2477 case SKD_DRVR_STATE_FAULT
:
2478 case SKD_DRVR_STATE_IDLE
:
2479 case SKD_DRVR_STATE_LOAD
:
2480 skdev
->state
= SKD_DRVR_STATE_ONLINE
;
2481 dev_err(&skdev
->pdev
->dev
, "Driver state %s(%d)=>%s(%d)\n",
2482 skd_skdev_state_to_str(prev_driver_state
),
2483 prev_driver_state
, skd_skdev_state_to_str(skdev
->state
),
2485 dev_dbg(&skdev
->pdev
->dev
,
2486 "**** device ONLINE...starting block queue\n");
2487 dev_dbg(&skdev
->pdev
->dev
, "starting queue\n");
2488 dev_info(&skdev
->pdev
->dev
, "STEC s1120 ONLINE\n");
2489 blk_start_queue(skdev
->queue
);
2490 skdev
->gendisk_on
= 1;
2491 wake_up_interruptible(&skdev
->waitq
);
2494 case SKD_DRVR_STATE_DISAPPEARED
:
2496 dev_dbg(&skdev
->pdev
->dev
,
2497 "**** driver state %d, not implemented\n",
2505 *****************************************************************************
2506 * PCIe MSI/MSI-X INTERRUPT HANDLERS
2507 *****************************************************************************
2510 static irqreturn_t
skd_reserved_isr(int irq
, void *skd_host_data
)
2512 struct skd_device
*skdev
= skd_host_data
;
2513 unsigned long flags
;
2515 spin_lock_irqsave(&skdev
->lock
, flags
);
2516 dev_dbg(&skdev
->pdev
->dev
, "MSIX = 0x%x\n",
2517 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
2518 dev_err(&skdev
->pdev
->dev
, "MSIX reserved irq %d = 0x%x\n", irq
,
2519 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
2520 SKD_WRITEL(skdev
, FIT_INT_RESERVED_MASK
, FIT_INT_STATUS_HOST
);
2521 spin_unlock_irqrestore(&skdev
->lock
, flags
);
2525 static irqreturn_t
skd_statec_isr(int irq
, void *skd_host_data
)
2527 struct skd_device
*skdev
= skd_host_data
;
2528 unsigned long flags
;
2530 spin_lock_irqsave(&skdev
->lock
, flags
);
2531 dev_dbg(&skdev
->pdev
->dev
, "MSIX = 0x%x\n",
2532 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
2533 SKD_WRITEL(skdev
, FIT_ISH_FW_STATE_CHANGE
, FIT_INT_STATUS_HOST
);
2534 skd_isr_fwstate(skdev
);
2535 spin_unlock_irqrestore(&skdev
->lock
, flags
);
2539 static irqreturn_t
skd_comp_q(int irq
, void *skd_host_data
)
2541 struct skd_device
*skdev
= skd_host_data
;
2542 unsigned long flags
;
2543 int flush_enqueued
= 0;
2546 spin_lock_irqsave(&skdev
->lock
, flags
);
2547 dev_dbg(&skdev
->pdev
->dev
, "MSIX = 0x%x\n",
2548 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
2549 SKD_WRITEL(skdev
, FIT_ISH_COMPLETION_POSTED
, FIT_INT_STATUS_HOST
);
2550 deferred
= skd_isr_completion_posted(skdev
, skd_isr_comp_limit
,
2553 blk_run_queue_async(skdev
->queue
);
2556 schedule_work(&skdev
->completion_worker
);
2557 else if (!flush_enqueued
)
2558 blk_run_queue_async(skdev
->queue
);
2560 spin_unlock_irqrestore(&skdev
->lock
, flags
);
2565 static irqreturn_t
skd_msg_isr(int irq
, void *skd_host_data
)
2567 struct skd_device
*skdev
= skd_host_data
;
2568 unsigned long flags
;
2570 spin_lock_irqsave(&skdev
->lock
, flags
);
2571 dev_dbg(&skdev
->pdev
->dev
, "MSIX = 0x%x\n",
2572 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
2573 SKD_WRITEL(skdev
, FIT_ISH_MSG_FROM_DEV
, FIT_INT_STATUS_HOST
);
2574 skd_isr_msg_from_dev(skdev
);
2575 spin_unlock_irqrestore(&skdev
->lock
, flags
);
2579 static irqreturn_t
skd_qfull_isr(int irq
, void *skd_host_data
)
2581 struct skd_device
*skdev
= skd_host_data
;
2582 unsigned long flags
;
2584 spin_lock_irqsave(&skdev
->lock
, flags
);
2585 dev_dbg(&skdev
->pdev
->dev
, "MSIX = 0x%x\n",
2586 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
2587 SKD_WRITEL(skdev
, FIT_INT_QUEUE_FULL
, FIT_INT_STATUS_HOST
);
2588 spin_unlock_irqrestore(&skdev
->lock
, flags
);
2593 *****************************************************************************
2594 * PCIe MSI/MSI-X SETUP
2595 *****************************************************************************
2598 struct skd_msix_entry
{
2602 struct skd_init_msix_entry
{
2604 irq_handler_t handler
;
2607 #define SKD_MAX_MSIX_COUNT 13
2608 #define SKD_MIN_MSIX_COUNT 7
2609 #define SKD_BASE_MSIX_IRQ 4
2611 static struct skd_init_msix_entry msix_entries
[SKD_MAX_MSIX_COUNT
] = {
2612 { "(DMA 0)", skd_reserved_isr
},
2613 { "(DMA 1)", skd_reserved_isr
},
2614 { "(DMA 2)", skd_reserved_isr
},
2615 { "(DMA 3)", skd_reserved_isr
},
2616 { "(State Change)", skd_statec_isr
},
2617 { "(COMPL_Q)", skd_comp_q
},
2618 { "(MSG)", skd_msg_isr
},
2619 { "(Reserved)", skd_reserved_isr
},
2620 { "(Reserved)", skd_reserved_isr
},
2621 { "(Queue Full 0)", skd_qfull_isr
},
2622 { "(Queue Full 1)", skd_qfull_isr
},
2623 { "(Queue Full 2)", skd_qfull_isr
},
2624 { "(Queue Full 3)", skd_qfull_isr
},
2627 static int skd_acquire_msix(struct skd_device
*skdev
)
2630 struct pci_dev
*pdev
= skdev
->pdev
;
2632 rc
= pci_alloc_irq_vectors(pdev
, SKD_MAX_MSIX_COUNT
, SKD_MAX_MSIX_COUNT
,
2635 dev_err(&skdev
->pdev
->dev
, "failed to enable MSI-X %d\n", rc
);
2639 skdev
->msix_entries
= kcalloc(SKD_MAX_MSIX_COUNT
,
2640 sizeof(struct skd_msix_entry
), GFP_KERNEL
);
2641 if (!skdev
->msix_entries
) {
2643 dev_err(&skdev
->pdev
->dev
, "msix table allocation error\n");
2647 /* Enable MSI-X vectors for the base queue */
2648 for (i
= 0; i
< SKD_MAX_MSIX_COUNT
; i
++) {
2649 struct skd_msix_entry
*qentry
= &skdev
->msix_entries
[i
];
2651 snprintf(qentry
->isr_name
, sizeof(qentry
->isr_name
),
2652 "%s%d-msix %s", DRV_NAME
, skdev
->devno
,
2653 msix_entries
[i
].name
);
2655 rc
= devm_request_irq(&skdev
->pdev
->dev
,
2656 pci_irq_vector(skdev
->pdev
, i
),
2657 msix_entries
[i
].handler
, 0,
2658 qentry
->isr_name
, skdev
);
2660 dev_err(&skdev
->pdev
->dev
,
2661 "Unable to register(%d) MSI-X handler %d: %s\n",
2662 rc
, i
, qentry
->isr_name
);
2667 dev_dbg(&skdev
->pdev
->dev
, "%d msix irq(s) enabled\n",
2668 SKD_MAX_MSIX_COUNT
);
2673 devm_free_irq(&pdev
->dev
, pci_irq_vector(pdev
, i
), skdev
);
2675 kfree(skdev
->msix_entries
);
2676 skdev
->msix_entries
= NULL
;
2680 static int skd_acquire_irq(struct skd_device
*skdev
)
2682 struct pci_dev
*pdev
= skdev
->pdev
;
2683 unsigned int irq_flag
= PCI_IRQ_LEGACY
;
2686 if (skd_isr_type
== SKD_IRQ_MSIX
) {
2687 rc
= skd_acquire_msix(skdev
);
2691 dev_err(&skdev
->pdev
->dev
,
2692 "failed to enable MSI-X, re-trying with MSI %d\n", rc
);
2695 snprintf(skdev
->isr_name
, sizeof(skdev
->isr_name
), "%s%d", DRV_NAME
,
2698 if (skd_isr_type
!= SKD_IRQ_LEGACY
)
2699 irq_flag
|= PCI_IRQ_MSI
;
2700 rc
= pci_alloc_irq_vectors(pdev
, 1, 1, irq_flag
);
2702 dev_err(&skdev
->pdev
->dev
,
2703 "failed to allocate the MSI interrupt %d\n", rc
);
2707 rc
= devm_request_irq(&pdev
->dev
, pdev
->irq
, skd_isr
,
2708 pdev
->msi_enabled
? 0 : IRQF_SHARED
,
2709 skdev
->isr_name
, skdev
);
2711 pci_free_irq_vectors(pdev
);
2712 dev_err(&skdev
->pdev
->dev
, "failed to allocate interrupt %d\n",
2720 static void skd_release_irq(struct skd_device
*skdev
)
2722 struct pci_dev
*pdev
= skdev
->pdev
;
2724 if (skdev
->msix_entries
) {
2727 for (i
= 0; i
< SKD_MAX_MSIX_COUNT
; i
++) {
2728 devm_free_irq(&pdev
->dev
, pci_irq_vector(pdev
, i
),
2732 kfree(skdev
->msix_entries
);
2733 skdev
->msix_entries
= NULL
;
2735 devm_free_irq(&pdev
->dev
, pdev
->irq
, skdev
);
2738 pci_free_irq_vectors(pdev
);
2742 *****************************************************************************
2744 *****************************************************************************
2747 static int skd_cons_skcomp(struct skd_device
*skdev
)
2750 struct fit_completion_entry_v1
*skcomp
;
2752 dev_dbg(&skdev
->pdev
->dev
,
2753 "comp pci_alloc, total bytes %zd entries %d\n",
2754 SKD_SKCOMP_SIZE
, SKD_N_COMPLETION_ENTRY
);
2756 skcomp
= pci_zalloc_consistent(skdev
->pdev
, SKD_SKCOMP_SIZE
,
2757 &skdev
->cq_dma_address
);
2759 if (skcomp
== NULL
) {
2764 skdev
->skcomp_table
= skcomp
;
2765 skdev
->skerr_table
= (struct fit_comp_error_info
*)((char *)skcomp
+
2767 SKD_N_COMPLETION_ENTRY
);
2773 static int skd_cons_skmsg(struct skd_device
*skdev
)
2778 dev_dbg(&skdev
->pdev
->dev
,
2779 "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
2780 sizeof(struct skd_fitmsg_context
), skdev
->num_fitmsg_context
,
2781 sizeof(struct skd_fitmsg_context
) * skdev
->num_fitmsg_context
);
2783 skdev
->skmsg_table
= kcalloc(skdev
->num_fitmsg_context
,
2784 sizeof(struct skd_fitmsg_context
),
2786 if (skdev
->skmsg_table
== NULL
) {
2791 for (i
= 0; i
< skdev
->num_fitmsg_context
; i
++) {
2792 struct skd_fitmsg_context
*skmsg
;
2794 skmsg
= &skdev
->skmsg_table
[i
];
2796 skmsg
->id
= i
+ SKD_ID_FIT_MSG
;
2798 skmsg
->msg_buf
= pci_alloc_consistent(skdev
->pdev
,
2800 &skmsg
->mb_dma_address
);
2802 if (skmsg
->msg_buf
== NULL
) {
2807 WARN(((uintptr_t)skmsg
->msg_buf
| skmsg
->mb_dma_address
) &
2808 (FIT_QCMD_ALIGN
- 1),
2809 "not aligned: msg_buf %p mb_dma_address %#llx\n",
2810 skmsg
->msg_buf
, skmsg
->mb_dma_address
);
2811 memset(skmsg
->msg_buf
, 0, SKD_N_FITMSG_BYTES
);
2818 static struct fit_sg_descriptor
*skd_cons_sg_list(struct skd_device
*skdev
,
2820 dma_addr_t
*ret_dma_addr
)
2822 struct fit_sg_descriptor
*sg_list
;
2825 nbytes
= sizeof(*sg_list
) * n_sg
;
2827 sg_list
= pci_alloc_consistent(skdev
->pdev
, nbytes
, ret_dma_addr
);
2829 if (sg_list
!= NULL
) {
2830 uint64_t dma_address
= *ret_dma_addr
;
2833 memset(sg_list
, 0, nbytes
);
2835 for (i
= 0; i
< n_sg
- 1; i
++) {
2837 ndp_off
= (i
+ 1) * sizeof(struct fit_sg_descriptor
);
2839 sg_list
[i
].next_desc_ptr
= dma_address
+ ndp_off
;
2841 sg_list
[i
].next_desc_ptr
= 0LL;
2847 static int skd_cons_skreq(struct skd_device
*skdev
)
2852 dev_dbg(&skdev
->pdev
->dev
,
2853 "skreq_table kcalloc, struct %lu, count %u total %lu\n",
2854 sizeof(struct skd_request_context
), skdev
->num_req_context
,
2855 sizeof(struct skd_request_context
) * skdev
->num_req_context
);
2857 skdev
->skreq_table
= kcalloc(skdev
->num_req_context
,
2858 sizeof(struct skd_request_context
),
2860 if (skdev
->skreq_table
== NULL
) {
2865 dev_dbg(&skdev
->pdev
->dev
, "alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
2866 skdev
->sgs_per_request
, sizeof(struct scatterlist
),
2867 skdev
->sgs_per_request
* sizeof(struct scatterlist
));
2869 for (i
= 0; i
< skdev
->num_req_context
; i
++) {
2870 struct skd_request_context
*skreq
;
2872 skreq
= &skdev
->skreq_table
[i
];
2873 skreq
->state
= SKD_REQ_STATE_IDLE
;
2874 skreq
->sg
= kcalloc(skdev
->sgs_per_request
,
2875 sizeof(struct scatterlist
), GFP_KERNEL
);
2876 if (skreq
->sg
== NULL
) {
2880 sg_init_table(skreq
->sg
, skdev
->sgs_per_request
);
2882 skreq
->sksg_list
= skd_cons_sg_list(skdev
,
2883 skdev
->sgs_per_request
,
2884 &skreq
->sksg_dma_address
);
2886 if (skreq
->sksg_list
== NULL
) {
2896 static int skd_cons_sksb(struct skd_device
*skdev
)
2899 struct skd_special_context
*skspcl
;
2902 skspcl
= &skdev
->internal_skspcl
;
2904 skspcl
->req
.id
= 0 + SKD_ID_INTERNAL
;
2905 skspcl
->req
.state
= SKD_REQ_STATE_IDLE
;
2907 nbytes
= SKD_N_INTERNAL_BYTES
;
2909 skspcl
->data_buf
= pci_zalloc_consistent(skdev
->pdev
, nbytes
,
2910 &skspcl
->db_dma_address
);
2911 if (skspcl
->data_buf
== NULL
) {
2916 nbytes
= SKD_N_SPECIAL_FITMSG_BYTES
;
2917 skspcl
->msg_buf
= pci_zalloc_consistent(skdev
->pdev
, nbytes
,
2918 &skspcl
->mb_dma_address
);
2919 if (skspcl
->msg_buf
== NULL
) {
2924 skspcl
->req
.sksg_list
= skd_cons_sg_list(skdev
, 1,
2925 &skspcl
->req
.sksg_dma_address
);
2926 if (skspcl
->req
.sksg_list
== NULL
) {
2931 if (!skd_format_internal_skspcl(skdev
)) {
2940 static int skd_cons_disk(struct skd_device
*skdev
)
2943 struct gendisk
*disk
;
2944 struct request_queue
*q
;
2945 unsigned long flags
;
2947 disk
= alloc_disk(SKD_MINORS_PER_DEVICE
);
2954 sprintf(disk
->disk_name
, DRV_NAME
"%u", skdev
->devno
);
2956 disk
->major
= skdev
->major
;
2957 disk
->first_minor
= skdev
->devno
* SKD_MINORS_PER_DEVICE
;
2958 disk
->fops
= &skd_blockdev_ops
;
2959 disk
->private_data
= skdev
;
2961 q
= blk_init_queue(skd_request_fn
, &skdev
->lock
);
2966 blk_queue_bounce_limit(q
, BLK_BOUNCE_HIGH
);
2967 q
->nr_requests
= skd_max_queue_depth
/ 2;
2968 blk_queue_init_tags(q
, skd_max_queue_depth
, NULL
, BLK_TAG_ALLOC_FIFO
);
2972 q
->queuedata
= skdev
;
2974 blk_queue_write_cache(q
, true, true);
2975 blk_queue_max_segments(q
, skdev
->sgs_per_request
);
2976 blk_queue_max_hw_sectors(q
, SKD_N_MAX_SECTORS
);
2978 /* set optimal I/O size to 8KB */
2979 blk_queue_io_opt(q
, 8192);
2981 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, q
);
2982 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, q
);
2984 spin_lock_irqsave(&skdev
->lock
, flags
);
2985 dev_dbg(&skdev
->pdev
->dev
, "stopping queue\n");
2986 blk_stop_queue(skdev
->queue
);
2987 spin_unlock_irqrestore(&skdev
->lock
, flags
);
2993 #define SKD_N_DEV_TABLE 16u
2994 static u32 skd_next_devno
;
2996 static struct skd_device
*skd_construct(struct pci_dev
*pdev
)
2998 struct skd_device
*skdev
;
2999 int blk_major
= skd_major
;
3002 skdev
= kzalloc(sizeof(*skdev
), GFP_KERNEL
);
3005 dev_err(&pdev
->dev
, "memory alloc failure\n");
3009 skdev
->state
= SKD_DRVR_STATE_LOAD
;
3011 skdev
->devno
= skd_next_devno
++;
3012 skdev
->major
= blk_major
;
3013 skdev
->dev_max_queue_depth
= 0;
3015 skdev
->num_req_context
= skd_max_queue_depth
;
3016 skdev
->num_fitmsg_context
= skd_max_queue_depth
;
3017 skdev
->cur_max_queue_depth
= 1;
3018 skdev
->queue_low_water_mark
= 1;
3019 skdev
->proto_ver
= 99;
3020 skdev
->sgs_per_request
= skd_sgs_per_request
;
3021 skdev
->dbg_level
= skd_dbg_level
;
3023 spin_lock_init(&skdev
->lock
);
3025 INIT_WORK(&skdev
->completion_worker
, skd_completion_worker
);
3027 dev_dbg(&skdev
->pdev
->dev
, "skcomp\n");
3028 rc
= skd_cons_skcomp(skdev
);
3032 dev_dbg(&skdev
->pdev
->dev
, "skmsg\n");
3033 rc
= skd_cons_skmsg(skdev
);
3037 dev_dbg(&skdev
->pdev
->dev
, "skreq\n");
3038 rc
= skd_cons_skreq(skdev
);
3042 dev_dbg(&skdev
->pdev
->dev
, "sksb\n");
3043 rc
= skd_cons_sksb(skdev
);
3047 dev_dbg(&skdev
->pdev
->dev
, "disk\n");
3048 rc
= skd_cons_disk(skdev
);
3052 dev_dbg(&skdev
->pdev
->dev
, "VICTORY\n");
3056 dev_dbg(&skdev
->pdev
->dev
, "construct failed\n");
3057 skd_destruct(skdev
);
3062 *****************************************************************************
3064 *****************************************************************************
3067 static void skd_free_skcomp(struct skd_device
*skdev
)
3069 if (skdev
->skcomp_table
)
3070 pci_free_consistent(skdev
->pdev
, SKD_SKCOMP_SIZE
,
3071 skdev
->skcomp_table
, skdev
->cq_dma_address
);
3073 skdev
->skcomp_table
= NULL
;
3074 skdev
->cq_dma_address
= 0;
3077 static void skd_free_skmsg(struct skd_device
*skdev
)
3081 if (skdev
->skmsg_table
== NULL
)
3084 for (i
= 0; i
< skdev
->num_fitmsg_context
; i
++) {
3085 struct skd_fitmsg_context
*skmsg
;
3087 skmsg
= &skdev
->skmsg_table
[i
];
3089 if (skmsg
->msg_buf
!= NULL
) {
3090 pci_free_consistent(skdev
->pdev
, SKD_N_FITMSG_BYTES
,
3092 skmsg
->mb_dma_address
);
3094 skmsg
->msg_buf
= NULL
;
3095 skmsg
->mb_dma_address
= 0;
3098 kfree(skdev
->skmsg_table
);
3099 skdev
->skmsg_table
= NULL
;
3102 static void skd_free_sg_list(struct skd_device
*skdev
,
3103 struct fit_sg_descriptor
*sg_list
,
3104 u32 n_sg
, dma_addr_t dma_addr
)
3106 if (sg_list
!= NULL
) {
3109 nbytes
= sizeof(*sg_list
) * n_sg
;
3111 pci_free_consistent(skdev
->pdev
, nbytes
, sg_list
, dma_addr
);
3115 static void skd_free_skreq(struct skd_device
*skdev
)
3119 if (skdev
->skreq_table
== NULL
)
3122 for (i
= 0; i
< skdev
->num_req_context
; i
++) {
3123 struct skd_request_context
*skreq
;
3125 skreq
= &skdev
->skreq_table
[i
];
3127 skd_free_sg_list(skdev
, skreq
->sksg_list
,
3128 skdev
->sgs_per_request
,
3129 skreq
->sksg_dma_address
);
3131 skreq
->sksg_list
= NULL
;
3132 skreq
->sksg_dma_address
= 0;
3137 kfree(skdev
->skreq_table
);
3138 skdev
->skreq_table
= NULL
;
3141 static void skd_free_sksb(struct skd_device
*skdev
)
3143 struct skd_special_context
*skspcl
;
3146 skspcl
= &skdev
->internal_skspcl
;
3148 if (skspcl
->data_buf
!= NULL
) {
3149 nbytes
= SKD_N_INTERNAL_BYTES
;
3151 pci_free_consistent(skdev
->pdev
, nbytes
,
3152 skspcl
->data_buf
, skspcl
->db_dma_address
);
3155 skspcl
->data_buf
= NULL
;
3156 skspcl
->db_dma_address
= 0;
3158 if (skspcl
->msg_buf
!= NULL
) {
3159 nbytes
= SKD_N_SPECIAL_FITMSG_BYTES
;
3160 pci_free_consistent(skdev
->pdev
, nbytes
,
3161 skspcl
->msg_buf
, skspcl
->mb_dma_address
);
3164 skspcl
->msg_buf
= NULL
;
3165 skspcl
->mb_dma_address
= 0;
3167 skd_free_sg_list(skdev
, skspcl
->req
.sksg_list
, 1,
3168 skspcl
->req
.sksg_dma_address
);
3170 skspcl
->req
.sksg_list
= NULL
;
3171 skspcl
->req
.sksg_dma_address
= 0;
3174 static void skd_free_disk(struct skd_device
*skdev
)
3176 struct gendisk
*disk
= skdev
->disk
;
3178 if (disk
&& (disk
->flags
& GENHD_FL_UP
))
3182 blk_cleanup_queue(skdev
->queue
);
3183 skdev
->queue
= NULL
;
3191 static void skd_destruct(struct skd_device
*skdev
)
3196 dev_dbg(&skdev
->pdev
->dev
, "disk\n");
3197 skd_free_disk(skdev
);
3199 dev_dbg(&skdev
->pdev
->dev
, "sksb\n");
3200 skd_free_sksb(skdev
);
3202 dev_dbg(&skdev
->pdev
->dev
, "skreq\n");
3203 skd_free_skreq(skdev
);
3205 dev_dbg(&skdev
->pdev
->dev
, "skmsg\n");
3206 skd_free_skmsg(skdev
);
3208 dev_dbg(&skdev
->pdev
->dev
, "skcomp\n");
3209 skd_free_skcomp(skdev
);
3211 dev_dbg(&skdev
->pdev
->dev
, "skdev\n");
3216 *****************************************************************************
3217 * BLOCK DEVICE (BDEV) GLUE
3218 *****************************************************************************
3221 static int skd_bdev_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
3223 struct skd_device
*skdev
;
3226 skdev
= bdev
->bd_disk
->private_data
;
3228 dev_dbg(&skdev
->pdev
->dev
, "%s: CMD[%s] getgeo device\n",
3229 bdev
->bd_disk
->disk_name
, current
->comm
);
3231 if (skdev
->read_cap_is_valid
) {
3232 capacity
= get_capacity(skdev
->disk
);
3235 geo
->cylinders
= (capacity
) / (255 * 64);
3242 static int skd_bdev_attach(struct device
*parent
, struct skd_device
*skdev
)
3244 dev_dbg(&skdev
->pdev
->dev
, "add_disk\n");
3245 device_add_disk(parent
, skdev
->disk
);
3249 static const struct block_device_operations skd_blockdev_ops
= {
3250 .owner
= THIS_MODULE
,
3251 .getgeo
= skd_bdev_getgeo
,
3255 *****************************************************************************
3257 *****************************************************************************
3260 static const struct pci_device_id skd_pci_tbl
[] = {
3261 { PCI_VENDOR_ID_STEC
, PCI_DEVICE_ID_S1120
,
3262 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
3263 { 0 } /* terminate list */
3266 MODULE_DEVICE_TABLE(pci
, skd_pci_tbl
);
3268 static char *skd_pci_info(struct skd_device
*skdev
, char *str
)
3272 strcpy(str
, "PCIe (");
3273 pcie_reg
= pci_find_capability(skdev
->pdev
, PCI_CAP_ID_EXP
);
3278 uint16_t pcie_lstat
, lspeed
, lwidth
;
3281 pci_read_config_word(skdev
->pdev
, pcie_reg
, &pcie_lstat
);
3282 lspeed
= pcie_lstat
& (0xF);
3283 lwidth
= (pcie_lstat
& 0x3F0) >> 4;
3286 strcat(str
, "2.5GT/s ");
3287 else if (lspeed
== 2)
3288 strcat(str
, "5.0GT/s ");
3290 strcat(str
, "<unknown> ");
3291 snprintf(lwstr
, sizeof(lwstr
), "%dX)", lwidth
);
3297 static int skd_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3302 struct skd_device
*skdev
;
3304 dev_info(&pdev
->dev
, "STEC s1120 Driver(%s) version %s-b%s\n",
3305 DRV_NAME
, DRV_VERSION
, DRV_BUILD_ID
);
3306 dev_info(&pdev
->dev
, "vendor=%04X device=%04x\n", pdev
->vendor
,
3309 rc
= pci_enable_device(pdev
);
3312 rc
= pci_request_regions(pdev
, DRV_NAME
);
3315 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
3317 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64))) {
3318 dev_err(&pdev
->dev
, "consistent DMA mask error %d\n",
3322 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
3324 dev_err(&pdev
->dev
, "DMA mask error %d\n", rc
);
3325 goto err_out_regions
;
3330 rc
= register_blkdev(0, DRV_NAME
);
3332 goto err_out_regions
;
3337 skdev
= skd_construct(pdev
);
3338 if (skdev
== NULL
) {
3340 goto err_out_regions
;
3343 skd_pci_info(skdev
, pci_str
);
3344 dev_info(&pdev
->dev
, "%s 64bit\n", pci_str
);
3346 pci_set_master(pdev
);
3347 rc
= pci_enable_pcie_error_reporting(pdev
);
3350 "bad enable of PCIe error reporting rc=%d\n", rc
);
3351 skdev
->pcie_error_reporting_is_enabled
= 0;
3353 skdev
->pcie_error_reporting_is_enabled
= 1;
3355 pci_set_drvdata(pdev
, skdev
);
3357 for (i
= 0; i
< SKD_MAX_BARS
; i
++) {
3358 skdev
->mem_phys
[i
] = pci_resource_start(pdev
, i
);
3359 skdev
->mem_size
[i
] = (u32
)pci_resource_len(pdev
, i
);
3360 skdev
->mem_map
[i
] = ioremap(skdev
->mem_phys
[i
],
3361 skdev
->mem_size
[i
]);
3362 if (!skdev
->mem_map
[i
]) {
3364 "Unable to map adapter memory!\n");
3366 goto err_out_iounmap
;
3368 dev_dbg(&pdev
->dev
, "mem_map=%p, phyd=%016llx, size=%d\n",
3369 skdev
->mem_map
[i
], (uint64_t)skdev
->mem_phys
[i
],
3370 skdev
->mem_size
[i
]);
3373 rc
= skd_acquire_irq(skdev
);
3375 dev_err(&pdev
->dev
, "interrupt resource error %d\n", rc
);
3376 goto err_out_iounmap
;
3379 rc
= skd_start_timer(skdev
);
3383 init_waitqueue_head(&skdev
->waitq
);
3385 skd_start_device(skdev
);
3387 rc
= wait_event_interruptible_timeout(skdev
->waitq
,
3388 (skdev
->gendisk_on
),
3389 (SKD_START_WAIT_SECONDS
* HZ
));
3390 if (skdev
->gendisk_on
> 0) {
3391 /* device came on-line after reset */
3392 skd_bdev_attach(&pdev
->dev
, skdev
);
3395 /* we timed out, something is wrong with the device,
3396 don't add the disk structure */
3397 dev_err(&pdev
->dev
, "error: waiting for s1120 timed out %d!\n",
3399 /* in case of no error; we timeout with ENXIO */
3408 skd_stop_device(skdev
);
3409 skd_release_irq(skdev
);
3412 for (i
= 0; i
< SKD_MAX_BARS
; i
++)
3413 if (skdev
->mem_map
[i
])
3414 iounmap(skdev
->mem_map
[i
]);
3416 if (skdev
->pcie_error_reporting_is_enabled
)
3417 pci_disable_pcie_error_reporting(pdev
);
3419 skd_destruct(skdev
);
3422 pci_release_regions(pdev
);
3425 pci_disable_device(pdev
);
3426 pci_set_drvdata(pdev
, NULL
);
3430 static void skd_pci_remove(struct pci_dev
*pdev
)
3433 struct skd_device
*skdev
;
3435 skdev
= pci_get_drvdata(pdev
);
3437 dev_err(&pdev
->dev
, "no device data for PCI\n");
3440 skd_stop_device(skdev
);
3441 skd_release_irq(skdev
);
3443 for (i
= 0; i
< SKD_MAX_BARS
; i
++)
3444 if (skdev
->mem_map
[i
])
3445 iounmap(skdev
->mem_map
[i
]);
3447 if (skdev
->pcie_error_reporting_is_enabled
)
3448 pci_disable_pcie_error_reporting(pdev
);
3450 skd_destruct(skdev
);
3452 pci_release_regions(pdev
);
3453 pci_disable_device(pdev
);
3454 pci_set_drvdata(pdev
, NULL
);
3459 static int skd_pci_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3462 struct skd_device
*skdev
;
3464 skdev
= pci_get_drvdata(pdev
);
3466 dev_err(&pdev
->dev
, "no device data for PCI\n");
3470 skd_stop_device(skdev
);
3472 skd_release_irq(skdev
);
3474 for (i
= 0; i
< SKD_MAX_BARS
; i
++)
3475 if (skdev
->mem_map
[i
])
3476 iounmap(skdev
->mem_map
[i
]);
3478 if (skdev
->pcie_error_reporting_is_enabled
)
3479 pci_disable_pcie_error_reporting(pdev
);
3481 pci_release_regions(pdev
);
3482 pci_save_state(pdev
);
3483 pci_disable_device(pdev
);
3484 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3488 static int skd_pci_resume(struct pci_dev
*pdev
)
3492 struct skd_device
*skdev
;
3494 skdev
= pci_get_drvdata(pdev
);
3496 dev_err(&pdev
->dev
, "no device data for PCI\n");
3500 pci_set_power_state(pdev
, PCI_D0
);
3501 pci_enable_wake(pdev
, PCI_D0
, 0);
3502 pci_restore_state(pdev
);
3504 rc
= pci_enable_device(pdev
);
3507 rc
= pci_request_regions(pdev
, DRV_NAME
);
3510 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
3512 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64))) {
3514 dev_err(&pdev
->dev
, "consistent DMA mask error %d\n",
3518 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
3521 dev_err(&pdev
->dev
, "DMA mask error %d\n", rc
);
3522 goto err_out_regions
;
3526 pci_set_master(pdev
);
3527 rc
= pci_enable_pcie_error_reporting(pdev
);
3530 "bad enable of PCIe error reporting rc=%d\n", rc
);
3531 skdev
->pcie_error_reporting_is_enabled
= 0;
3533 skdev
->pcie_error_reporting_is_enabled
= 1;
3535 for (i
= 0; i
< SKD_MAX_BARS
; i
++) {
3537 skdev
->mem_phys
[i
] = pci_resource_start(pdev
, i
);
3538 skdev
->mem_size
[i
] = (u32
)pci_resource_len(pdev
, i
);
3539 skdev
->mem_map
[i
] = ioremap(skdev
->mem_phys
[i
],
3540 skdev
->mem_size
[i
]);
3541 if (!skdev
->mem_map
[i
]) {
3542 dev_err(&pdev
->dev
, "Unable to map adapter memory!\n");
3544 goto err_out_iounmap
;
3546 dev_dbg(&pdev
->dev
, "mem_map=%p, phyd=%016llx, size=%d\n",
3547 skdev
->mem_map
[i
], (uint64_t)skdev
->mem_phys
[i
],
3548 skdev
->mem_size
[i
]);
3550 rc
= skd_acquire_irq(skdev
);
3552 dev_err(&pdev
->dev
, "interrupt resource error %d\n", rc
);
3553 goto err_out_iounmap
;
3556 rc
= skd_start_timer(skdev
);
3560 init_waitqueue_head(&skdev
->waitq
);
3562 skd_start_device(skdev
);
3567 skd_stop_device(skdev
);
3568 skd_release_irq(skdev
);
3571 for (i
= 0; i
< SKD_MAX_BARS
; i
++)
3572 if (skdev
->mem_map
[i
])
3573 iounmap(skdev
->mem_map
[i
]);
3575 if (skdev
->pcie_error_reporting_is_enabled
)
3576 pci_disable_pcie_error_reporting(pdev
);
3579 pci_release_regions(pdev
);
3582 pci_disable_device(pdev
);
3586 static void skd_pci_shutdown(struct pci_dev
*pdev
)
3588 struct skd_device
*skdev
;
3590 dev_err(&pdev
->dev
, "%s called\n", __func__
);
3592 skdev
= pci_get_drvdata(pdev
);
3594 dev_err(&pdev
->dev
, "no device data for PCI\n");
3598 dev_err(&pdev
->dev
, "calling stop\n");
3599 skd_stop_device(skdev
);
3602 static struct pci_driver skd_driver
= {
3604 .id_table
= skd_pci_tbl
,
3605 .probe
= skd_pci_probe
,
3606 .remove
= skd_pci_remove
,
3607 .suspend
= skd_pci_suspend
,
3608 .resume
= skd_pci_resume
,
3609 .shutdown
= skd_pci_shutdown
,
3613 *****************************************************************************
3615 *****************************************************************************
3618 const char *skd_drive_state_to_str(int state
)
3621 case FIT_SR_DRIVE_OFFLINE
:
3623 case FIT_SR_DRIVE_INIT
:
3625 case FIT_SR_DRIVE_ONLINE
:
3627 case FIT_SR_DRIVE_BUSY
:
3629 case FIT_SR_DRIVE_FAULT
:
3631 case FIT_SR_DRIVE_DEGRADED
:
3633 case FIT_SR_PCIE_LINK_DOWN
:
3635 case FIT_SR_DRIVE_SOFT_RESET
:
3636 return "SOFT_RESET";
3637 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD
:
3639 case FIT_SR_DRIVE_INIT_FAULT
:
3640 return "INIT_FAULT";
3641 case FIT_SR_DRIVE_BUSY_SANITIZE
:
3642 return "BUSY_SANITIZE";
3643 case FIT_SR_DRIVE_BUSY_ERASE
:
3644 return "BUSY_ERASE";
3645 case FIT_SR_DRIVE_FW_BOOTING
:
3646 return "FW_BOOTING";
3652 const char *skd_skdev_state_to_str(enum skd_drvr_state state
)
3655 case SKD_DRVR_STATE_LOAD
:
3657 case SKD_DRVR_STATE_IDLE
:
3659 case SKD_DRVR_STATE_BUSY
:
3661 case SKD_DRVR_STATE_STARTING
:
3663 case SKD_DRVR_STATE_ONLINE
:
3665 case SKD_DRVR_STATE_PAUSING
:
3667 case SKD_DRVR_STATE_PAUSED
:
3669 case SKD_DRVR_STATE_DRAINING_TIMEOUT
:
3670 return "DRAINING_TIMEOUT";
3671 case SKD_DRVR_STATE_RESTARTING
:
3672 return "RESTARTING";
3673 case SKD_DRVR_STATE_RESUMING
:
3675 case SKD_DRVR_STATE_STOPPING
:
3677 case SKD_DRVR_STATE_SYNCING
:
3679 case SKD_DRVR_STATE_FAULT
:
3681 case SKD_DRVR_STATE_DISAPPEARED
:
3682 return "DISAPPEARED";
3683 case SKD_DRVR_STATE_BUSY_ERASE
:
3684 return "BUSY_ERASE";
3685 case SKD_DRVR_STATE_BUSY_SANITIZE
:
3686 return "BUSY_SANITIZE";
3687 case SKD_DRVR_STATE_BUSY_IMMINENT
:
3688 return "BUSY_IMMINENT";
3689 case SKD_DRVR_STATE_WAIT_BOOT
:
3697 static const char *skd_skreq_state_to_str(enum skd_req_state state
)
3700 case SKD_REQ_STATE_IDLE
:
3702 case SKD_REQ_STATE_SETUP
:
3704 case SKD_REQ_STATE_BUSY
:
3706 case SKD_REQ_STATE_COMPLETED
:
3708 case SKD_REQ_STATE_TIMEOUT
:
3715 static void skd_log_skdev(struct skd_device
*skdev
, const char *event
)
3717 dev_dbg(&skdev
->pdev
->dev
, "skdev=%p event='%s'\n", skdev
, event
);
3718 dev_dbg(&skdev
->pdev
->dev
, " drive_state=%s(%d) driver_state=%s(%d)\n",
3719 skd_drive_state_to_str(skdev
->drive_state
), skdev
->drive_state
,
3720 skd_skdev_state_to_str(skdev
->state
), skdev
->state
);
3721 dev_dbg(&skdev
->pdev
->dev
, " busy=%d limit=%d dev=%d lowat=%d\n",
3722 atomic_read(&skdev
->in_flight
), skdev
->cur_max_queue_depth
,
3723 skdev
->dev_max_queue_depth
, skdev
->queue_low_water_mark
);
3724 dev_dbg(&skdev
->pdev
->dev
, " timestamp=0x%x cycle=%d cycle_ix=%d\n",
3725 atomic_read(&skdev
->timeout_stamp
), skdev
->skcomp_cycle
,
3729 static void skd_log_skreq(struct skd_device
*skdev
,
3730 struct skd_request_context
*skreq
, const char *event
)
3732 dev_dbg(&skdev
->pdev
->dev
, "skreq=%p event='%s'\n", skreq
, event
);
3733 dev_dbg(&skdev
->pdev
->dev
, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
3734 skd_skreq_state_to_str(skreq
->state
), skreq
->state
, skreq
->id
,
3736 dev_dbg(&skdev
->pdev
->dev
, " timo=0x%x sg_dir=%d n_sg=%d\n",
3737 skreq
->timeout_stamp
, skreq
->data_dir
, skreq
->n_sg
);
3739 if (skreq
->req
!= NULL
) {
3740 struct request
*req
= skreq
->req
;
3741 u32 lba
= (u32
)blk_rq_pos(req
);
3742 u32 count
= blk_rq_sectors(req
);
3744 dev_dbg(&skdev
->pdev
->dev
,
3745 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req
,
3746 lba
, lba
, count
, count
, (int)rq_data_dir(req
));
3748 dev_dbg(&skdev
->pdev
->dev
, "req=NULL\n");
3752 *****************************************************************************
3754 *****************************************************************************
3757 static int __init
skd_init(void)
3759 BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1
) != 8);
3760 BUILD_BUG_ON(sizeof(struct fit_comp_error_info
) != 32);
3761 BUILD_BUG_ON(sizeof(struct skd_command_header
) != 16);
3762 BUILD_BUG_ON(sizeof(struct skd_scsi_request
) != 32);
3763 BUILD_BUG_ON(sizeof(struct driver_inquiry_data
) != 44);
3764 BUILD_BUG_ON(offsetof(struct skd_msg_buf
, fmh
) != 0);
3765 BUILD_BUG_ON(offsetof(struct skd_msg_buf
, scsi
) != 64);
3766 BUILD_BUG_ON(sizeof(struct skd_msg_buf
) != SKD_N_FITMSG_BYTES
);
3768 pr_info(PFX
" v%s-b%s loaded\n", DRV_VERSION
, DRV_BUILD_ID
);
3770 switch (skd_isr_type
) {
3771 case SKD_IRQ_LEGACY
:
3776 pr_err(PFX
"skd_isr_type %d invalid, re-set to %d\n",
3777 skd_isr_type
, SKD_IRQ_DEFAULT
);
3778 skd_isr_type
= SKD_IRQ_DEFAULT
;
3781 if (skd_max_queue_depth
< 1 ||
3782 skd_max_queue_depth
> SKD_MAX_QUEUE_DEPTH
) {
3783 pr_err(PFX
"skd_max_queue_depth %d invalid, re-set to %d\n",
3784 skd_max_queue_depth
, SKD_MAX_QUEUE_DEPTH_DEFAULT
);
3785 skd_max_queue_depth
= SKD_MAX_QUEUE_DEPTH_DEFAULT
;
3788 if (skd_max_req_per_msg
< 1 ||
3789 skd_max_req_per_msg
> SKD_MAX_REQ_PER_MSG
) {
3790 pr_err(PFX
"skd_max_req_per_msg %d invalid, re-set to %d\n",
3791 skd_max_req_per_msg
, SKD_MAX_REQ_PER_MSG_DEFAULT
);
3792 skd_max_req_per_msg
= SKD_MAX_REQ_PER_MSG_DEFAULT
;
3795 if (skd_sgs_per_request
< 1 || skd_sgs_per_request
> 4096) {
3796 pr_err(PFX
"skd_sg_per_request %d invalid, re-set to %d\n",
3797 skd_sgs_per_request
, SKD_N_SG_PER_REQ_DEFAULT
);
3798 skd_sgs_per_request
= SKD_N_SG_PER_REQ_DEFAULT
;
3801 if (skd_dbg_level
< 0 || skd_dbg_level
> 2) {
3802 pr_err(PFX
"skd_dbg_level %d invalid, re-set to %d\n",
3807 if (skd_isr_comp_limit
< 0) {
3808 pr_err(PFX
"skd_isr_comp_limit %d invalid, set to %d\n",
3809 skd_isr_comp_limit
, 0);
3810 skd_isr_comp_limit
= 0;
3813 return pci_register_driver(&skd_driver
);
3816 static void __exit
skd_exit(void)
3818 pr_info(PFX
" v%s-b%s unloading\n", DRV_VERSION
, DRV_BUILD_ID
);
3820 pci_unregister_driver(&skd_driver
);
3823 unregister_blkdev(skd_major
, DRV_NAME
);
3826 module_init(skd_init
);
3827 module_exit(skd_exit
);