]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/block/skd_main.c
blk-mq: merge the softirq vs non-softirq IPI logic
[mirror_ubuntu-hirsute-kernel.git] / drivers / block / skd_main.c
CommitLineData
97873a3d 1// SPDX-License-Identifier: GPL-2.0-only
bec9e8ac
BVA
2/*
3 * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
4 * was acquired by Western Digital in 2012.
e67f86b3 5 *
bec9e8ac
BVA
6 * Copyright 2012 sTec, Inc.
7 * Copyright (c) 2017 Western Digital Corporation or its affiliates.
e67f86b3
AB
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/pci.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/blkdev.h>
f18c17c8 17#include <linux/blk-mq.h>
e67f86b3
AB
18#include <linux/sched.h>
19#include <linux/interrupt.h>
20#include <linux/compiler.h>
21#include <linux/workqueue.h>
e67f86b3
AB
22#include <linux/delay.h>
23#include <linux/time.h>
24#include <linux/hdreg.h>
25#include <linux/dma-mapping.h>
26#include <linux/completion.h>
27#include <linux/scatterlist.h>
28#include <linux/version.h>
29#include <linux/err.h>
e67f86b3 30#include <linux/aer.h>
e67f86b3 31#include <linux/wait.h>
2da7b403 32#include <linux/stringify.h>
e67f86b3 33#include <scsi/scsi.h>
e67f86b3
AB
34#include <scsi/sg.h>
35#include <linux/io.h>
36#include <linux/uaccess.h>
4ca90b53 37#include <asm/unaligned.h>
e67f86b3
AB
38
39#include "skd_s1120.h"
40
41static int skd_dbg_level;
42static int skd_isr_comp_limit = 4;
43
e67f86b3
AB
44#define SKD_ASSERT(expr) \
45 do { \
46 if (unlikely(!(expr))) { \
47 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
48 # expr, __FILE__, __func__, __LINE__); \
49 } \
50 } while (0)
51
e67f86b3 52#define DRV_NAME "skd"
e67f86b3 53#define PFX DRV_NAME ": "
e67f86b3 54
bec9e8ac 55MODULE_LICENSE("GPL");
e67f86b3 56
bb9f7dd3 57MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver");
e67f86b3
AB
58
59#define PCI_VENDOR_ID_STEC 0x1B39
60#define PCI_DEVICE_ID_S1120 0x0001
61
62#define SKD_FUA_NV (1 << 1)
63#define SKD_MINORS_PER_DEVICE 16
64
65#define SKD_MAX_QUEUE_DEPTH 200u
66
67#define SKD_PAUSE_TIMEOUT (5 * 1000)
68
69#define SKD_N_FITMSG_BYTES (512u)
2da7b403 70#define SKD_MAX_REQ_PER_MSG 14
e67f86b3 71
e67f86b3
AB
72#define SKD_N_SPECIAL_FITMSG_BYTES (128u)
73
74/* SG elements are 32 bytes, so we can make this 4096 and still be under the
75 * 128KB limit. That allows 4096*4K = 16M xfer size
76 */
77#define SKD_N_SG_PER_REQ_DEFAULT 256u
e67f86b3
AB
78
79#define SKD_N_COMPLETION_ENTRY 256u
80#define SKD_N_READ_CAP_BYTES (8u)
81
82#define SKD_N_INTERNAL_BYTES (512u)
83
6f7c7675
BVA
84#define SKD_SKCOMP_SIZE \
85 ((sizeof(struct fit_completion_entry_v1) + \
86 sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
87
e67f86b3 88/* 5 bits of uniqifier, 0xF800 */
e67f86b3
AB
89#define SKD_ID_TABLE_MASK (3u << 8u)
90#define SKD_ID_RW_REQUEST (0u << 8u)
91#define SKD_ID_INTERNAL (1u << 8u)
e67f86b3
AB
92#define SKD_ID_FIT_MSG (3u << 8u)
93#define SKD_ID_SLOT_MASK 0x00FFu
94#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
95
e67f86b3
AB
96#define SKD_N_MAX_SECTORS 2048u
97
98#define SKD_MAX_RETRIES 2u
99
100#define SKD_TIMER_SECONDS(seconds) (seconds)
101#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
102
103#define INQ_STD_NBYTES 36
e67f86b3
AB
104
105enum skd_drvr_state {
106 SKD_DRVR_STATE_LOAD,
107 SKD_DRVR_STATE_IDLE,
108 SKD_DRVR_STATE_BUSY,
109 SKD_DRVR_STATE_STARTING,
110 SKD_DRVR_STATE_ONLINE,
111 SKD_DRVR_STATE_PAUSING,
112 SKD_DRVR_STATE_PAUSED,
e67f86b3
AB
113 SKD_DRVR_STATE_RESTARTING,
114 SKD_DRVR_STATE_RESUMING,
115 SKD_DRVR_STATE_STOPPING,
116 SKD_DRVR_STATE_FAULT,
117 SKD_DRVR_STATE_DISAPPEARED,
118 SKD_DRVR_STATE_PROTOCOL_MISMATCH,
119 SKD_DRVR_STATE_BUSY_ERASE,
120 SKD_DRVR_STATE_BUSY_SANITIZE,
121 SKD_DRVR_STATE_BUSY_IMMINENT,
122 SKD_DRVR_STATE_WAIT_BOOT,
123 SKD_DRVR_STATE_SYNCING,
124};
125
126#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
127#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
128#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
e67f86b3
AB
129#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
130#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
131#define SKD_START_WAIT_SECONDS 90u
132
133enum skd_req_state {
134 SKD_REQ_STATE_IDLE,
135 SKD_REQ_STATE_SETUP,
136 SKD_REQ_STATE_BUSY,
137 SKD_REQ_STATE_COMPLETED,
138 SKD_REQ_STATE_TIMEOUT,
e67f86b3
AB
139};
140
e67f86b3
AB
141enum skd_check_status_action {
142 SKD_CHECK_STATUS_REPORT_GOOD,
143 SKD_CHECK_STATUS_REPORT_SMART_ALERT,
144 SKD_CHECK_STATUS_REQUEUE_REQUEST,
145 SKD_CHECK_STATUS_REPORT_ERROR,
146 SKD_CHECK_STATUS_BUSY_IMMINENT,
147};
148
d891fe60
BVA
149struct skd_msg_buf {
150 struct fit_msg_hdr fmh;
151 struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
152};
153
e67f86b3 154struct skd_fitmsg_context {
e67f86b3 155 u32 id;
e67f86b3
AB
156
157 u32 length;
e67f86b3 158
d891fe60 159 struct skd_msg_buf *msg_buf;
e67f86b3
AB
160 dma_addr_t mb_dma_address;
161};
162
163struct skd_request_context {
164 enum skd_req_state state;
165
e67f86b3
AB
166 u16 id;
167 u32 fitmsg_id;
168
e67f86b3 169 u8 flush_cmd;
e67f86b3 170
b1824eef 171 enum dma_data_direction data_dir;
e67f86b3
AB
172 struct scatterlist *sg;
173 u32 n_sg;
174 u32 sg_byte_count;
175
176 struct fit_sg_descriptor *sksg_list;
177 dma_addr_t sksg_dma_address;
178
179 struct fit_completion_entry_v1 completion;
180
181 struct fit_comp_error_info err_info;
1bee4243 182 int retries;
e67f86b3 183
f2fe4459 184 blk_status_t status;
e67f86b3 185};
e67f86b3
AB
186
187struct skd_special_context {
188 struct skd_request_context req;
189
e67f86b3
AB
190 void *data_buf;
191 dma_addr_t db_dma_address;
192
d891fe60 193 struct skd_msg_buf *msg_buf;
e67f86b3
AB
194 dma_addr_t mb_dma_address;
195};
196
e67f86b3
AB
197typedef enum skd_irq_type {
198 SKD_IRQ_LEGACY,
199 SKD_IRQ_MSI,
200 SKD_IRQ_MSIX
201} skd_irq_type_t;
202
203#define SKD_MAX_BARS 2
204
205struct skd_device {
85e34112 206 void __iomem *mem_map[SKD_MAX_BARS];
e67f86b3
AB
207 resource_size_t mem_phys[SKD_MAX_BARS];
208 u32 mem_size[SKD_MAX_BARS];
209
e67f86b3
AB
210 struct skd_msix_entry *msix_entries;
211
212 struct pci_dev *pdev;
213 int pcie_error_reporting_is_enabled;
214
215 spinlock_t lock;
216 struct gendisk *disk;
ca33dd92 217 struct blk_mq_tag_set tag_set;
e67f86b3 218 struct request_queue *queue;
91f85da4 219 struct skd_fitmsg_context *skmsg;
e67f86b3
AB
220 struct device *class_dev;
221 int gendisk_on;
222 int sync_done;
223
e67f86b3
AB
224 u32 devno;
225 u32 major;
e67f86b3
AB
226 char isr_name[30];
227
228 enum skd_drvr_state state;
229 u32 drive_state;
230
e67f86b3
AB
231 u32 cur_max_queue_depth;
232 u32 queue_low_water_mark;
233 u32 dev_max_queue_depth;
234
235 u32 num_fitmsg_context;
236 u32 num_req_context;
237
e67f86b3
AB
238 struct skd_fitmsg_context *skmsg_table;
239
e67f86b3
AB
240 struct skd_special_context internal_skspcl;
241 u32 read_cap_blocksize;
242 u32 read_cap_last_lba;
243 int read_cap_is_valid;
244 int inquiry_is_valid;
245 u8 inq_serial_num[13]; /*12 chars plus null term */
e67f86b3
AB
246
247 u8 skcomp_cycle;
248 u32 skcomp_ix;
a3db102d
BVA
249 struct kmem_cache *msgbuf_cache;
250 struct kmem_cache *sglist_cache;
251 struct kmem_cache *databuf_cache;
e67f86b3
AB
252 struct fit_completion_entry_v1 *skcomp_table;
253 struct fit_comp_error_info *skerr_table;
254 dma_addr_t cq_dma_address;
255
256 wait_queue_head_t waitq;
257
258 struct timer_list timer;
259 u32 timer_countdown;
260 u32 timer_substate;
261
e67f86b3
AB
262 int sgs_per_request;
263 u32 last_mtd;
264
265 u32 proto_ver;
266
267 int dbg_level;
268 u32 connect_time_stamp;
269 int connect_retries;
270#define SKD_MAX_CONNECT_RETRIES 16
271 u32 drive_jiffies;
272
273 u32 timo_slot;
274
ca33dd92 275 struct work_struct start_queue;
38d4a1bb 276 struct work_struct completion_worker;
e67f86b3
AB
277};
278
279#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
280#define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
281#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
282
283static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
284{
14262a4b 285 u32 val = readl(skdev->mem_map[1] + offset);
e67f86b3 286
14262a4b 287 if (unlikely(skdev->dbg_level >= 2))
f98806d6 288 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
14262a4b 289 return val;
e67f86b3
AB
290}
291
292static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
293 u32 offset)
294{
14262a4b
BVA
295 writel(val, skdev->mem_map[1] + offset);
296 if (unlikely(skdev->dbg_level >= 2))
f98806d6 297 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
e67f86b3
AB
298}
299
300static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
301 u32 offset)
302{
14262a4b
BVA
303 writeq(val, skdev->mem_map[1] + offset);
304 if (unlikely(skdev->dbg_level >= 2))
f98806d6
BVA
305 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
306 val);
e67f86b3
AB
307}
308
309
744353b6 310#define SKD_IRQ_DEFAULT SKD_IRQ_MSIX
e67f86b3
AB
311static int skd_isr_type = SKD_IRQ_DEFAULT;
312
313module_param(skd_isr_type, int, 0444);
314MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
315 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
316
317#define SKD_MAX_REQ_PER_MSG_DEFAULT 1
318static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
319
320module_param(skd_max_req_per_msg, int, 0444);
321MODULE_PARM_DESC(skd_max_req_per_msg,
322 "Maximum SCSI requests packed in a single message."
2da7b403 323 " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
e67f86b3
AB
324
325#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
326#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
327static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
328
329module_param(skd_max_queue_depth, int, 0444);
330MODULE_PARM_DESC(skd_max_queue_depth,
331 "Maximum SCSI requests issued to s1120."
332 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
333
334static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
335module_param(skd_sgs_per_request, int, 0444);
336MODULE_PARM_DESC(skd_sgs_per_request,
337 "Maximum SG elements per block request."
338 " (1-4096, default==256)");
339
63214121 340static int skd_max_pass_thru = 1;
e67f86b3
AB
341module_param(skd_max_pass_thru, int, 0444);
342MODULE_PARM_DESC(skd_max_pass_thru,
63214121 343 "Maximum SCSI pass-thru at a time. IGNORED");
e67f86b3
AB
344
345module_param(skd_dbg_level, int, 0444);
346MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
347
348module_param(skd_isr_comp_limit, int, 0444);
349MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
350
e67f86b3
AB
351/* Major device number dynamically assigned. */
352static u32 skd_major;
353
e67f86b3
AB
354static void skd_destruct(struct skd_device *skdev);
355static const struct block_device_operations skd_blockdev_ops;
356static void skd_send_fitmsg(struct skd_device *skdev,
357 struct skd_fitmsg_context *skmsg);
358static void skd_send_special_fitmsg(struct skd_device *skdev,
359 struct skd_special_context *skspcl);
2a842aca 360static bool skd_preop_sg_list(struct skd_device *skdev,
e67f86b3
AB
361 struct skd_request_context *skreq);
362static void skd_postop_sg_list(struct skd_device *skdev,
363 struct skd_request_context *skreq);
364
365static void skd_restart_device(struct skd_device *skdev);
366static int skd_quiesce_dev(struct skd_device *skdev);
367static int skd_unquiesce_dev(struct skd_device *skdev);
e67f86b3
AB
368static void skd_disable_interrupts(struct skd_device *skdev);
369static void skd_isr_fwstate(struct skd_device *skdev);
79ce12a8 370static void skd_recover_requests(struct skd_device *skdev);
e67f86b3
AB
371static void skd_soft_reset(struct skd_device *skdev);
372
e67f86b3
AB
373const char *skd_drive_state_to_str(int state);
374const char *skd_skdev_state_to_str(enum skd_drvr_state state);
375static void skd_log_skdev(struct skd_device *skdev, const char *event);
e67f86b3
AB
376static void skd_log_skreq(struct skd_device *skdev,
377 struct skd_request_context *skreq, const char *event);
378
e67f86b3
AB
379/*
380 *****************************************************************************
381 * READ/WRITE REQUESTS
382 *****************************************************************************
383 */
7baa8572 384static bool skd_inc_in_flight(struct request *rq, void *data, bool reserved)
d4d0f5fc
BVA
385{
386 int *count = data;
387
388 count++;
7baa8572 389 return true;
d4d0f5fc
BVA
390}
391
392static int skd_in_flight(struct skd_device *skdev)
393{
394 int count = 0;
395
396 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count);
397
398 return count;
399}
400
e67f86b3
AB
401static void
402skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
403 int data_dir, unsigned lba,
404 unsigned count)
405{
406 if (data_dir == READ)
fb4844b8 407 scsi_req->cdb[0] = READ_10;
e67f86b3 408 else
fb4844b8 409 scsi_req->cdb[0] = WRITE_10;
e67f86b3
AB
410
411 scsi_req->cdb[1] = 0;
412 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
413 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
414 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
415 scsi_req->cdb[5] = (lba & 0xff);
416 scsi_req->cdb[6] = 0;
417 scsi_req->cdb[7] = (count & 0xff00) >> 8;
418 scsi_req->cdb[8] = count & 0xff;
419 scsi_req->cdb[9] = 0;
420}
421
422static void
423skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
38d4a1bb 424 struct skd_request_context *skreq)
e67f86b3
AB
425{
426 skreq->flush_cmd = 1;
427
fb4844b8 428 scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
e67f86b3
AB
429 scsi_req->cdb[1] = 0;
430 scsi_req->cdb[2] = 0;
431 scsi_req->cdb[3] = 0;
432 scsi_req->cdb[4] = 0;
433 scsi_req->cdb[5] = 0;
434 scsi_req->cdb[6] = 0;
435 scsi_req->cdb[7] = 0;
436 scsi_req->cdb[8] = 0;
437 scsi_req->cdb[9] = 0;
438}
439
3d17a679
BVA
440/*
441 * Return true if and only if all pending requests should be failed.
442 */
443static bool skd_fail_all(struct request_queue *q)
cb6981b9
BVA
444{
445 struct skd_device *skdev = q->queuedata;
446
447 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
448
449 skd_log_skdev(skdev, "req_not_online");
450 switch (skdev->state) {
451 case SKD_DRVR_STATE_PAUSING:
452 case SKD_DRVR_STATE_PAUSED:
453 case SKD_DRVR_STATE_STARTING:
454 case SKD_DRVR_STATE_RESTARTING:
455 case SKD_DRVR_STATE_WAIT_BOOT:
456 /* In case of starting, we haven't started the queue,
457 * so we can't get here... but requests are
458 * possibly hanging out waiting for us because we
459 * reported the dev/skd0 already. They'll wait
460 * forever if connect doesn't complete.
461 * What to do??? delay dev/skd0 ??
462 */
463 case SKD_DRVR_STATE_BUSY:
464 case SKD_DRVR_STATE_BUSY_IMMINENT:
465 case SKD_DRVR_STATE_BUSY_ERASE:
3d17a679 466 return false;
cb6981b9
BVA
467
468 case SKD_DRVR_STATE_BUSY_SANITIZE:
469 case SKD_DRVR_STATE_STOPPING:
470 case SKD_DRVR_STATE_SYNCING:
471 case SKD_DRVR_STATE_FAULT:
472 case SKD_DRVR_STATE_DISAPPEARED:
473 default:
3d17a679 474 return true;
cb6981b9 475 }
cb6981b9 476}
e67f86b3 477
c39c6c77
BVA
478static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
479 const struct blk_mq_queue_data *mqd)
e67f86b3 480{
c39c6c77 481 struct request *const req = mqd->rq;
91f85da4 482 struct request_queue *const q = req->q;
e67f86b3 483 struct skd_device *skdev = q->queuedata;
91f85da4
BVA
484 struct skd_fitmsg_context *skmsg;
485 struct fit_msg_hdr *fmh;
486 const u32 tag = blk_mq_unique_tag(req);
e7278a8b 487 struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
e67f86b3 488 struct skd_scsi_request *scsi_req;
74c74282 489 unsigned long flags = 0;
e2bb5548
BVA
490 const u32 lba = blk_rq_pos(req);
491 const u32 count = blk_rq_sectors(req);
492 const int data_dir = rq_data_dir(req);
91f85da4 493
c39c6c77
BVA
494 if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
495 return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
496
1bee4243
CH
497 if (!(req->rq_flags & RQF_DONTPREP)) {
498 skreq->retries = 0;
499 req->rq_flags |= RQF_DONTPREP;
500 }
501
c39c6c77
BVA
502 blk_mq_start_request(req);
503
91f85da4
BVA
504 WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
505 tag, skd_max_queue_depth, q->nr_requests);
506
507 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
508
91f85da4
BVA
509 dev_dbg(&skdev->pdev->dev,
510 "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
511 lba, count, count, data_dir);
512
513 skreq->id = tag + SKD_ID_RW_REQUEST;
514 skreq->flush_cmd = 0;
515 skreq->n_sg = 0;
516 skreq->sg_byte_count = 0;
517
91f85da4
BVA
518 skreq->fitmsg_id = 0;
519
520 skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
521
522 if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
523 dev_dbg(&skdev->pdev->dev, "error Out\n");
795bc1b5
BVA
524 skreq->status = BLK_STS_RESOURCE;
525 blk_mq_complete_request(req);
c39c6c77 526 return BLK_STS_OK;
91f85da4
BVA
527 }
528
a3db102d
BVA
529 dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
530 skreq->n_sg *
531 sizeof(struct fit_sg_descriptor),
532 DMA_TO_DEVICE);
533
91f85da4 534 /* Either a FIT msg is in progress or we have to start one. */
74c74282
BVA
535 if (skd_max_req_per_msg == 1) {
536 skmsg = NULL;
537 } else {
538 spin_lock_irqsave(&skdev->lock, flags);
539 skmsg = skdev->skmsg;
540 }
91f85da4
BVA
541 if (!skmsg) {
542 skmsg = &skdev->skmsg_table[tag];
543 skdev->skmsg = skmsg;
544
545 /* Initialize the FIT msg header */
546 fmh = &skmsg->msg_buf->fmh;
547 memset(fmh, 0, sizeof(*fmh));
548 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
549 skmsg->length = sizeof(*fmh);
550 } else {
551 fmh = &skmsg->msg_buf->fmh;
552 }
553
554 skreq->fitmsg_id = skmsg->id;
555
556 scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
557 memset(scsi_req, 0, sizeof(*scsi_req));
558
91f85da4 559 scsi_req->hdr.tag = skreq->id;
e2bb5548
BVA
560 scsi_req->hdr.sg_list_dma_address =
561 cpu_to_be64(skreq->sksg_dma_address);
91f85da4 562
e2bb5548 563 if (req_op(req) == REQ_OP_FLUSH) {
91f85da4
BVA
564 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
565 SKD_ASSERT(skreq->flush_cmd == 1);
566 } else {
567 skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
568 }
569
e2bb5548 570 if (req->cmd_flags & REQ_FUA)
91f85da4
BVA
571 scsi_req->cdb[1] |= SKD_FUA_NV;
572
573 scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count);
574
575 /* Complete resource allocations. */
576 skreq->state = SKD_REQ_STATE_BUSY;
577
578 skmsg->length += sizeof(struct skd_scsi_request);
579 fmh->num_protocol_cmds_coalesced++;
580
91f85da4 581 dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
d4d0f5fc 582 skd_in_flight(skdev));
91f85da4
BVA
583
584 /*
585 * If the FIT msg buffer is full send it.
586 */
74c74282 587 if (skd_max_req_per_msg == 1) {
91f85da4 588 skd_send_fitmsg(skdev, skmsg);
74c74282 589 } else {
c39c6c77 590 if (mqd->last ||
74c74282
BVA
591 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
592 skd_send_fitmsg(skdev, skmsg);
593 skdev->skmsg = NULL;
594 }
595 spin_unlock_irqrestore(&skdev->lock, flags);
91f85da4 596 }
e67f86b3 597
ca33dd92 598 return BLK_STS_OK;
e67f86b3
AB
599}
600
f2fe4459
BVA
601static enum blk_eh_timer_return skd_timed_out(struct request *req,
602 bool reserved)
a74d5b76
BVA
603{
604 struct skd_device *skdev = req->q->queuedata;
605
606 dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
607 blk_mq_unique_tag(req));
608
f2fe4459 609 return BLK_EH_RESET_TIMER;
a74d5b76
BVA
610}
611
296cb94c 612static void skd_complete_rq(struct request *req)
a74d5b76 613{
a74d5b76 614 struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
a74d5b76 615
f2fe4459 616 blk_mq_end_request(req, skreq->status);
a74d5b76
BVA
617}
618
2a842aca 619static bool skd_preop_sg_list(struct skd_device *skdev,
38d4a1bb 620 struct skd_request_context *skreq)
e67f86b3 621{
e7278a8b 622 struct request *req = blk_mq_rq_from_pdu(skreq);
06f824c4 623 struct scatterlist *sgl = &skreq->sg[0], *sg;
e67f86b3
AB
624 int n_sg;
625 int i;
626
627 skreq->sg_byte_count = 0;
628
b1824eef
BVA
629 WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
630 skreq->data_dir != DMA_FROM_DEVICE);
e67f86b3 631
06f824c4 632 n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
e67f86b3 633 if (n_sg <= 0)
2a842aca 634 return false;
e67f86b3
AB
635
636 /*
637 * Map scatterlist to PCI bus addresses.
638 * Note PCI might change the number of entries.
639 */
13812621 640 n_sg = dma_map_sg(&skdev->pdev->dev, sgl, n_sg, skreq->data_dir);
e67f86b3 641 if (n_sg <= 0)
2a842aca 642 return false;
e67f86b3
AB
643
644 SKD_ASSERT(n_sg <= skdev->sgs_per_request);
645
646 skreq->n_sg = n_sg;
647
06f824c4 648 for_each_sg(sgl, sg, n_sg, i) {
e67f86b3 649 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
06f824c4
BVA
650 u32 cnt = sg_dma_len(sg);
651 uint64_t dma_addr = sg_dma_address(sg);
e67f86b3
AB
652
653 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
654 sgd->byte_count = cnt;
655 skreq->sg_byte_count += cnt;
656 sgd->host_side_addr = dma_addr;
657 sgd->dev_side_addr = 0;
658 }
659
660 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
661 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
662
663 if (unlikely(skdev->dbg_level > 1)) {
f98806d6 664 dev_dbg(&skdev->pdev->dev,
ea870bb2
HD
665 "skreq=%x sksg_list=%p sksg_dma=%pad\n",
666 skreq->id, skreq->sksg_list, &skreq->sksg_dma_address);
e67f86b3
AB
667 for (i = 0; i < n_sg; i++) {
668 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
f98806d6
BVA
669
670 dev_dbg(&skdev->pdev->dev,
671 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
672 i, sgd->byte_count, sgd->control,
673 sgd->host_side_addr, sgd->next_desc_ptr);
e67f86b3
AB
674 }
675 }
676
2a842aca 677 return true;
e67f86b3
AB
678}
679
fcd37eb3 680static void skd_postop_sg_list(struct skd_device *skdev,
38d4a1bb 681 struct skd_request_context *skreq)
e67f86b3 682{
e67f86b3
AB
683 /*
684 * restore the next ptr for next IO request so we
685 * don't have to set it every time.
686 */
687 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
688 skreq->sksg_dma_address +
689 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
13812621
CH
690 dma_unmap_sg(&skdev->pdev->dev, &skreq->sg[0], skreq->n_sg,
691 skreq->data_dir);
e67f86b3
AB
692}
693
e67f86b3
AB
694/*
695 *****************************************************************************
696 * TIMER
697 *****************************************************************************
698 */
699
700static void skd_timer_tick_not_online(struct skd_device *skdev);
701
ca33dd92
BVA
702static void skd_start_queue(struct work_struct *work)
703{
704 struct skd_device *skdev = container_of(work, typeof(*skdev),
705 start_queue);
706
707 /*
708 * Although it is safe to call blk_start_queue() from interrupt
709 * context, blk_mq_start_hw_queues() must not be called from
710 * interrupt context.
711 */
712 blk_mq_start_hw_queues(skdev->queue);
713}
714
e99e88a9 715static void skd_timer_tick(struct timer_list *t)
e67f86b3 716{
e99e88a9 717 struct skd_device *skdev = from_timer(skdev, t, timer);
e67f86b3
AB
718 unsigned long reqflags;
719 u32 state;
720
721 if (skdev->state == SKD_DRVR_STATE_FAULT)
722 /* The driver has declared fault, and we want it to
723 * stay that way until driver is reloaded.
724 */
725 return;
726
727 spin_lock_irqsave(&skdev->lock, reqflags);
728
729 state = SKD_READL(skdev, FIT_STATUS);
730 state &= FIT_SR_DRIVE_STATE_MASK;
731 if (state != skdev->drive_state)
732 skd_isr_fwstate(skdev);
733
a74d5b76 734 if (skdev->state != SKD_DRVR_STATE_ONLINE)
e67f86b3 735 skd_timer_tick_not_online(skdev);
e67f86b3 736
e67f86b3
AB
737 mod_timer(&skdev->timer, (jiffies + HZ));
738
739 spin_unlock_irqrestore(&skdev->lock, reqflags);
740}
741
742static void skd_timer_tick_not_online(struct skd_device *skdev)
743{
744 switch (skdev->state) {
745 case SKD_DRVR_STATE_IDLE:
746 case SKD_DRVR_STATE_LOAD:
747 break;
748 case SKD_DRVR_STATE_BUSY_SANITIZE:
f98806d6
BVA
749 dev_dbg(&skdev->pdev->dev,
750 "drive busy sanitize[%x], driver[%x]\n",
751 skdev->drive_state, skdev->state);
e67f86b3
AB
752 /* If we've been in sanitize for 3 seconds, we figure we're not
753 * going to get anymore completions, so recover requests now
754 */
755 if (skdev->timer_countdown > 0) {
756 skdev->timer_countdown--;
757 return;
758 }
79ce12a8 759 skd_recover_requests(skdev);
e67f86b3
AB
760 break;
761
762 case SKD_DRVR_STATE_BUSY:
763 case SKD_DRVR_STATE_BUSY_IMMINENT:
764 case SKD_DRVR_STATE_BUSY_ERASE:
f98806d6
BVA
765 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
766 skdev->state, skdev->timer_countdown);
e67f86b3
AB
767 if (skdev->timer_countdown > 0) {
768 skdev->timer_countdown--;
769 return;
770 }
f98806d6
BVA
771 dev_dbg(&skdev->pdev->dev,
772 "busy[%x], timedout=%d, restarting device.",
773 skdev->state, skdev->timer_countdown);
e67f86b3
AB
774 skd_restart_device(skdev);
775 break;
776
777 case SKD_DRVR_STATE_WAIT_BOOT:
778 case SKD_DRVR_STATE_STARTING:
779 if (skdev->timer_countdown > 0) {
780 skdev->timer_countdown--;
781 return;
782 }
783 /* For now, we fault the drive. Could attempt resets to
784 * revcover at some point. */
785 skdev->state = SKD_DRVR_STATE_FAULT;
786
f98806d6
BVA
787 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
788 skdev->drive_state);
e67f86b3
AB
789
790 /*start the queue so we can respond with error to requests */
791 /* wakeup anyone waiting for startup complete */
ca33dd92 792 schedule_work(&skdev->start_queue);
e67f86b3
AB
793 skdev->gendisk_on = -1;
794 wake_up_interruptible(&skdev->waitq);
795 break;
796
797 case SKD_DRVR_STATE_ONLINE:
798 /* shouldn't get here. */
799 break;
800
801 case SKD_DRVR_STATE_PAUSING:
802 case SKD_DRVR_STATE_PAUSED:
803 break;
804
e67f86b3
AB
805 case SKD_DRVR_STATE_RESTARTING:
806 if (skdev->timer_countdown > 0) {
807 skdev->timer_countdown--;
808 return;
809 }
810 /* For now, we fault the drive. Could attempt resets to
811 * revcover at some point. */
812 skdev->state = SKD_DRVR_STATE_FAULT;
f98806d6
BVA
813 dev_err(&skdev->pdev->dev,
814 "DriveFault Reconnect Timeout (%x)\n",
815 skdev->drive_state);
e67f86b3
AB
816
817 /*
818 * Recovering does two things:
819 * 1. completes IO with error
820 * 2. reclaims dma resources
821 * When is it safe to recover requests?
822 * - if the drive state is faulted
823 * - if the state is still soft reset after out timeout
824 * - if the drive registers are dead (state = FF)
825 * If it is "unsafe", we still need to recover, so we will
826 * disable pci bus mastering and disable our interrupts.
827 */
828
829 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
830 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
831 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
832 /* It never came out of soft reset. Try to
833 * recover the requests and then let them
834 * fail. This is to mitigate hung processes. */
79ce12a8 835 skd_recover_requests(skdev);
e67f86b3 836 else {
f98806d6
BVA
837 dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
838 skdev->drive_state);
e67f86b3
AB
839 pci_disable_device(skdev->pdev);
840 skd_disable_interrupts(skdev);
79ce12a8 841 skd_recover_requests(skdev);
e67f86b3
AB
842 }
843
844 /*start the queue so we can respond with error to requests */
845 /* wakeup anyone waiting for startup complete */
ca33dd92 846 schedule_work(&skdev->start_queue);
e67f86b3
AB
847 skdev->gendisk_on = -1;
848 wake_up_interruptible(&skdev->waitq);
849 break;
850
851 case SKD_DRVR_STATE_RESUMING:
852 case SKD_DRVR_STATE_STOPPING:
853 case SKD_DRVR_STATE_SYNCING:
854 case SKD_DRVR_STATE_FAULT:
855 case SKD_DRVR_STATE_DISAPPEARED:
856 default:
857 break;
858 }
859}
860
861static int skd_start_timer(struct skd_device *skdev)
862{
863 int rc;
864
e99e88a9 865 timer_setup(&skdev->timer, skd_timer_tick, 0);
e67f86b3
AB
866
867 rc = mod_timer(&skdev->timer, (jiffies + HZ));
868 if (rc)
f98806d6 869 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
e67f86b3
AB
870 return rc;
871}
872
873static void skd_kill_timer(struct skd_device *skdev)
874{
875 del_timer_sync(&skdev->timer);
876}
877
e67f86b3
AB
878/*
879 *****************************************************************************
880 * INTERNAL REQUESTS -- generated by driver itself
881 *****************************************************************************
882 */
883
884static int skd_format_internal_skspcl(struct skd_device *skdev)
885{
886 struct skd_special_context *skspcl = &skdev->internal_skspcl;
887 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
888 struct fit_msg_hdr *fmh;
889 uint64_t dma_address;
890 struct skd_scsi_request *scsi;
891
d891fe60 892 fmh = &skspcl->msg_buf->fmh;
e67f86b3
AB
893 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
894 fmh->num_protocol_cmds_coalesced = 1;
895
d891fe60 896 scsi = &skspcl->msg_buf->scsi[0];
e67f86b3
AB
897 memset(scsi, 0, sizeof(*scsi));
898 dma_address = skspcl->req.sksg_dma_address;
899 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
32494df9 900 skspcl->req.n_sg = 1;
e67f86b3
AB
901 sgd->control = FIT_SGD_CONTROL_LAST;
902 sgd->byte_count = 0;
903 sgd->host_side_addr = skspcl->db_dma_address;
904 sgd->dev_side_addr = 0;
905 sgd->next_desc_ptr = 0LL;
906
907 return 1;
908}
909
910#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
911
912static void skd_send_internal_skspcl(struct skd_device *skdev,
913 struct skd_special_context *skspcl,
914 u8 opcode)
915{
916 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
917 struct skd_scsi_request *scsi;
918 unsigned char *buf = skspcl->data_buf;
919 int i;
920
921 if (skspcl->req.state != SKD_REQ_STATE_IDLE)
922 /*
923 * A refresh is already in progress.
924 * Just wait for it to finish.
925 */
926 return;
927
e67f86b3 928 skspcl->req.state = SKD_REQ_STATE_BUSY;
e67f86b3 929
d891fe60 930 scsi = &skspcl->msg_buf->scsi[0];
e67f86b3
AB
931 scsi->hdr.tag = skspcl->req.id;
932
933 memset(scsi->cdb, 0, sizeof(scsi->cdb));
934
935 switch (opcode) {
936 case TEST_UNIT_READY:
937 scsi->cdb[0] = TEST_UNIT_READY;
938 sgd->byte_count = 0;
939 scsi->hdr.sg_list_len_bytes = 0;
940 break;
941
942 case READ_CAPACITY:
943 scsi->cdb[0] = READ_CAPACITY;
944 sgd->byte_count = SKD_N_READ_CAP_BYTES;
945 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
946 break;
947
948 case INQUIRY:
949 scsi->cdb[0] = INQUIRY;
950 scsi->cdb[1] = 0x01; /* evpd */
951 scsi->cdb[2] = 0x80; /* serial number page */
952 scsi->cdb[4] = 0x10;
953 sgd->byte_count = 16;
954 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
955 break;
956
957 case SYNCHRONIZE_CACHE:
958 scsi->cdb[0] = SYNCHRONIZE_CACHE;
959 sgd->byte_count = 0;
960 scsi->hdr.sg_list_len_bytes = 0;
961 break;
962
963 case WRITE_BUFFER:
964 scsi->cdb[0] = WRITE_BUFFER;
965 scsi->cdb[1] = 0x02;
966 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
967 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
968 sgd->byte_count = WR_BUF_SIZE;
969 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
970 /* fill incrementing byte pattern */
971 for (i = 0; i < sgd->byte_count; i++)
972 buf[i] = i & 0xFF;
973 break;
974
975 case READ_BUFFER:
976 scsi->cdb[0] = READ_BUFFER;
977 scsi->cdb[1] = 0x02;
978 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
979 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
980 sgd->byte_count = WR_BUF_SIZE;
981 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
982 memset(skspcl->data_buf, 0, sgd->byte_count);
983 break;
984
985 default:
986 SKD_ASSERT("Don't know what to send");
987 return;
988
989 }
990 skd_send_special_fitmsg(skdev, skspcl);
991}
992
993static void skd_refresh_device_data(struct skd_device *skdev)
994{
995 struct skd_special_context *skspcl = &skdev->internal_skspcl;
996
997 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
998}
999
1000static int skd_chk_read_buf(struct skd_device *skdev,
1001 struct skd_special_context *skspcl)
1002{
1003 unsigned char *buf = skspcl->data_buf;
1004 int i;
1005
1006 /* check for incrementing byte pattern */
1007 for (i = 0; i < WR_BUF_SIZE; i++)
1008 if (buf[i] != (i & 0xFF))
1009 return 1;
1010
1011 return 0;
1012}
1013
1014static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1015 u8 code, u8 qual, u8 fruc)
1016{
1017 /* If the check condition is of special interest, log a message */
1018 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1019 && (code == 0x04) && (qual == 0x06)) {
f98806d6
BVA
1020 dev_err(&skdev->pdev->dev,
1021 "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1022 key, code, qual, fruc);
e67f86b3
AB
1023 }
1024}
1025
1026static void skd_complete_internal(struct skd_device *skdev,
85e34112
BVA
1027 struct fit_completion_entry_v1 *skcomp,
1028 struct fit_comp_error_info *skerr,
e67f86b3
AB
1029 struct skd_special_context *skspcl)
1030{
1031 u8 *buf = skspcl->data_buf;
1032 u8 status;
1033 int i;
d891fe60 1034 struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
e67f86b3 1035
760b48ca
BVA
1036 lockdep_assert_held(&skdev->lock);
1037
e67f86b3
AB
1038 SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1039
f98806d6 1040 dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
e67f86b3 1041
a3db102d
BVA
1042 dma_sync_single_for_cpu(&skdev->pdev->dev,
1043 skspcl->db_dma_address,
1044 skspcl->req.sksg_list[0].byte_count,
1045 DMA_BIDIRECTIONAL);
1046
e67f86b3
AB
1047 skspcl->req.completion = *skcomp;
1048 skspcl->req.state = SKD_REQ_STATE_IDLE;
e67f86b3
AB
1049
1050 status = skspcl->req.completion.status;
1051
1052 skd_log_check_status(skdev, status, skerr->key, skerr->code,
1053 skerr->qual, skerr->fruc);
1054
1055 switch (scsi->cdb[0]) {
1056 case TEST_UNIT_READY:
1057 if (status == SAM_STAT_GOOD)
1058 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1059 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1060 (skerr->key == MEDIUM_ERROR))
1061 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1062 else {
1063 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
f98806d6
BVA
1064 dev_dbg(&skdev->pdev->dev,
1065 "TUR failed, don't send anymore state 0x%x\n",
1066 skdev->state);
e67f86b3
AB
1067 return;
1068 }
f98806d6
BVA
1069 dev_dbg(&skdev->pdev->dev,
1070 "**** TUR failed, retry skerr\n");
fb4844b8
BVA
1071 skd_send_internal_skspcl(skdev, skspcl,
1072 TEST_UNIT_READY);
e67f86b3
AB
1073 }
1074 break;
1075
1076 case WRITE_BUFFER:
1077 if (status == SAM_STAT_GOOD)
1078 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1079 else {
1080 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
f98806d6
BVA
1081 dev_dbg(&skdev->pdev->dev,
1082 "write buffer failed, don't send anymore state 0x%x\n",
1083 skdev->state);
e67f86b3
AB
1084 return;
1085 }
f98806d6
BVA
1086 dev_dbg(&skdev->pdev->dev,
1087 "**** write buffer failed, retry skerr\n");
fb4844b8
BVA
1088 skd_send_internal_skspcl(skdev, skspcl,
1089 TEST_UNIT_READY);
e67f86b3
AB
1090 }
1091 break;
1092
1093 case READ_BUFFER:
1094 if (status == SAM_STAT_GOOD) {
1095 if (skd_chk_read_buf(skdev, skspcl) == 0)
1096 skd_send_internal_skspcl(skdev, skspcl,
1097 READ_CAPACITY);
1098 else {
f98806d6
BVA
1099 dev_err(&skdev->pdev->dev,
1100 "*** W/R Buffer mismatch %d ***\n",
1101 skdev->connect_retries);
e67f86b3
AB
1102 if (skdev->connect_retries <
1103 SKD_MAX_CONNECT_RETRIES) {
1104 skdev->connect_retries++;
1105 skd_soft_reset(skdev);
1106 } else {
f98806d6
BVA
1107 dev_err(&skdev->pdev->dev,
1108 "W/R Buffer Connect Error\n");
e67f86b3
AB
1109 return;
1110 }
1111 }
1112
1113 } else {
1114 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
f98806d6
BVA
1115 dev_dbg(&skdev->pdev->dev,
1116 "read buffer failed, don't send anymore state 0x%x\n",
1117 skdev->state);
e67f86b3
AB
1118 return;
1119 }
f98806d6
BVA
1120 dev_dbg(&skdev->pdev->dev,
1121 "**** read buffer failed, retry skerr\n");
fb4844b8
BVA
1122 skd_send_internal_skspcl(skdev, skspcl,
1123 TEST_UNIT_READY);
e67f86b3
AB
1124 }
1125 break;
1126
1127 case READ_CAPACITY:
1128 skdev->read_cap_is_valid = 0;
1129 if (status == SAM_STAT_GOOD) {
1130 skdev->read_cap_last_lba =
1131 (buf[0] << 24) | (buf[1] << 16) |
1132 (buf[2] << 8) | buf[3];
1133 skdev->read_cap_blocksize =
1134 (buf[4] << 24) | (buf[5] << 16) |
1135 (buf[6] << 8) | buf[7];
1136
f98806d6
BVA
1137 dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
1138 skdev->read_cap_last_lba,
1139 skdev->read_cap_blocksize);
e67f86b3
AB
1140
1141 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1142
1143 skdev->read_cap_is_valid = 1;
1144
1145 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1146 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
1147 (skerr->key == MEDIUM_ERROR)) {
1148 skdev->read_cap_last_lba = ~0;
1149 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
f98806d6 1150 dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
e67f86b3
AB
1151 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1152 } else {
f98806d6 1153 dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
e67f86b3
AB
1154 skd_send_internal_skspcl(skdev, skspcl,
1155 TEST_UNIT_READY);
1156 }
1157 break;
1158
1159 case INQUIRY:
1160 skdev->inquiry_is_valid = 0;
1161 if (status == SAM_STAT_GOOD) {
1162 skdev->inquiry_is_valid = 1;
1163
1164 for (i = 0; i < 12; i++)
1165 skdev->inq_serial_num[i] = buf[i + 4];
1166 skdev->inq_serial_num[12] = 0;
1167 }
1168
1169 if (skd_unquiesce_dev(skdev) < 0)
f98806d6 1170 dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
e67f86b3
AB
1171 /* connection is complete */
1172 skdev->connect_retries = 0;
1173 break;
1174
1175 case SYNCHRONIZE_CACHE:
1176 if (status == SAM_STAT_GOOD)
1177 skdev->sync_done = 1;
1178 else
1179 skdev->sync_done = -1;
1180 wake_up_interruptible(&skdev->waitq);
1181 break;
1182
1183 default:
1184 SKD_ASSERT("we didn't send this");
1185 }
1186}
1187
1188/*
1189 *****************************************************************************
1190 * FIT MESSAGES
1191 *****************************************************************************
1192 */
1193
1194static void skd_send_fitmsg(struct skd_device *skdev,
1195 struct skd_fitmsg_context *skmsg)
1196{
1197 u64 qcmd;
e67f86b3 1198
ea870bb2
HD
1199 dev_dbg(&skdev->pdev->dev, "dma address %pad, busy=%d\n",
1200 &skmsg->mb_dma_address, skd_in_flight(skdev));
6507f436 1201 dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
e67f86b3
AB
1202
1203 qcmd = skmsg->mb_dma_address;
1204 qcmd |= FIT_QCMD_QID_NORMAL;
1205
e67f86b3
AB
1206 if (unlikely(skdev->dbg_level > 1)) {
1207 u8 *bp = (u8 *)skmsg->msg_buf;
1208 int i;
1209 for (i = 0; i < skmsg->length; i += 8) {
f98806d6
BVA
1210 dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
1211 &bp[i]);
e67f86b3
AB
1212 if (i == 0)
1213 i = 64 - 8;
1214 }
1215 }
1216
1217 if (skmsg->length > 256)
1218 qcmd |= FIT_QCMD_MSGSIZE_512;
1219 else if (skmsg->length > 128)
1220 qcmd |= FIT_QCMD_MSGSIZE_256;
1221 else if (skmsg->length > 64)
1222 qcmd |= FIT_QCMD_MSGSIZE_128;
1223 else
1224 /*
1225 * This makes no sense because the FIT msg header is
1226 * 64 bytes. If the msg is only 64 bytes long it has
1227 * no payload.
1228 */
1229 qcmd |= FIT_QCMD_MSGSIZE_64;
1230
a3db102d
BVA
1231 dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address,
1232 skmsg->length, DMA_TO_DEVICE);
1233
5fbd545c
BVA
1234 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1235 smp_wmb();
1236
e67f86b3 1237 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
e67f86b3
AB
1238}
1239
1240static void skd_send_special_fitmsg(struct skd_device *skdev,
1241 struct skd_special_context *skspcl)
1242{
1243 u64 qcmd;
1244
a3db102d
BVA
1245 WARN_ON_ONCE(skspcl->req.n_sg != 1);
1246
e67f86b3
AB
1247 if (unlikely(skdev->dbg_level > 1)) {
1248 u8 *bp = (u8 *)skspcl->msg_buf;
1249 int i;
1250
1251 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
f98806d6
BVA
1252 dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
1253 &bp[i]);
e67f86b3
AB
1254 if (i == 0)
1255 i = 64 - 8;
1256 }
1257
f98806d6 1258 dev_dbg(&skdev->pdev->dev,
ea870bb2 1259 "skspcl=%p id=%04x sksg_list=%p sksg_dma=%pad\n",
f98806d6 1260 skspcl, skspcl->req.id, skspcl->req.sksg_list,
ea870bb2 1261 &skspcl->req.sksg_dma_address);
e67f86b3
AB
1262 for (i = 0; i < skspcl->req.n_sg; i++) {
1263 struct fit_sg_descriptor *sgd =
1264 &skspcl->req.sksg_list[i];
1265
f98806d6
BVA
1266 dev_dbg(&skdev->pdev->dev,
1267 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
1268 i, sgd->byte_count, sgd->control,
1269 sgd->host_side_addr, sgd->next_desc_ptr);
e67f86b3
AB
1270 }
1271 }
1272
1273 /*
1274 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
1275 * and one 64-byte SSDI command.
1276 */
1277 qcmd = skspcl->mb_dma_address;
1278 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
1279
a3db102d
BVA
1280 dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address,
1281 SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE);
1282 dma_sync_single_for_device(&skdev->pdev->dev,
1283 skspcl->req.sksg_dma_address,
1284 1 * sizeof(struct fit_sg_descriptor),
1285 DMA_TO_DEVICE);
1286 dma_sync_single_for_device(&skdev->pdev->dev,
1287 skspcl->db_dma_address,
1288 skspcl->req.sksg_list[0].byte_count,
1289 DMA_BIDIRECTIONAL);
1290
5fbd545c
BVA
1291 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1292 smp_wmb();
1293
e67f86b3
AB
1294 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1295}
1296
1297/*
1298 *****************************************************************************
1299 * COMPLETION QUEUE
1300 *****************************************************************************
1301 */
1302
1303static void skd_complete_other(struct skd_device *skdev,
85e34112
BVA
1304 struct fit_completion_entry_v1 *skcomp,
1305 struct fit_comp_error_info *skerr);
e67f86b3 1306
e67f86b3
AB
1307struct sns_info {
1308 u8 type;
1309 u8 stat;
1310 u8 key;
1311 u8 asc;
1312 u8 ascq;
1313 u8 mask;
1314 enum skd_check_status_action action;
1315};
1316
1317static struct sns_info skd_chkstat_table[] = {
1318 /* Good */
1319 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
1320 SKD_CHECK_STATUS_REPORT_GOOD },
1321
1322 /* Smart alerts */
1323 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
1324 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1325 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
1326 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1327 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
1328 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1329
1330 /* Retry (with limits) */
1331 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
1332 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1333 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
1334 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1335 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
1336 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1337 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
1338 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1339
1340 /* Busy (or about to be) */
1341 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
1342 SKD_CHECK_STATUS_BUSY_IMMINENT },
1343};
1344
1345/*
1346 * Look up status and sense data to decide how to handle the error
1347 * from the device.
1348 * mask says which fields must match e.g., mask=0x18 means check
1349 * type and stat, ignore key, asc, ascq.
1350 */
1351
38d4a1bb
MS
1352static enum skd_check_status_action
1353skd_check_status(struct skd_device *skdev,
85e34112 1354 u8 cmp_status, struct fit_comp_error_info *skerr)
e67f86b3 1355{
0b2e0c07 1356 int i;
e67f86b3 1357
f98806d6
BVA
1358 dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1359 skerr->key, skerr->code, skerr->qual, skerr->fruc);
e67f86b3 1360
f98806d6
BVA
1361 dev_dbg(&skdev->pdev->dev,
1362 "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
1363 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
1364 skerr->fruc);
e67f86b3
AB
1365
1366 /* Does the info match an entry in the good category? */
0b2e0c07 1367 for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
e67f86b3
AB
1368 struct sns_info *sns = &skd_chkstat_table[i];
1369
1370 if (sns->mask & 0x10)
1371 if (skerr->type != sns->type)
1372 continue;
1373
1374 if (sns->mask & 0x08)
1375 if (cmp_status != sns->stat)
1376 continue;
1377
1378 if (sns->mask & 0x04)
1379 if (skerr->key != sns->key)
1380 continue;
1381
1382 if (sns->mask & 0x02)
1383 if (skerr->code != sns->asc)
1384 continue;
1385
1386 if (sns->mask & 0x01)
1387 if (skerr->qual != sns->ascq)
1388 continue;
1389
1390 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
f98806d6
BVA
1391 dev_err(&skdev->pdev->dev,
1392 "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
1393 skerr->key, skerr->code, skerr->qual);
e67f86b3
AB
1394 }
1395 return sns->action;
1396 }
1397
1398 /* No other match, so nonzero status means error,
1399 * zero status means good
1400 */
1401 if (cmp_status) {
f98806d6 1402 dev_dbg(&skdev->pdev->dev, "status check: error\n");
e67f86b3
AB
1403 return SKD_CHECK_STATUS_REPORT_ERROR;
1404 }
1405
f98806d6 1406 dev_dbg(&skdev->pdev->dev, "status check good default\n");
e67f86b3
AB
1407 return SKD_CHECK_STATUS_REPORT_GOOD;
1408}
1409
1410static void skd_resolve_req_exception(struct skd_device *skdev,
f18c17c8
BVA
1411 struct skd_request_context *skreq,
1412 struct request *req)
e67f86b3
AB
1413{
1414 u8 cmp_status = skreq->completion.status;
1415
1416 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
1417 case SKD_CHECK_STATUS_REPORT_GOOD:
1418 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
795bc1b5
BVA
1419 skreq->status = BLK_STS_OK;
1420 blk_mq_complete_request(req);
e67f86b3
AB
1421 break;
1422
1423 case SKD_CHECK_STATUS_BUSY_IMMINENT:
1424 skd_log_skreq(skdev, skreq, "retry(busy)");
6d1f9dfd 1425 blk_mq_requeue_request(req, true);
f98806d6 1426 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
e67f86b3
AB
1427 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
1428 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
1429 skd_quiesce_dev(skdev);
1430 break;
1431
1432 case SKD_CHECK_STATUS_REQUEUE_REQUEST:
1bee4243 1433 if (++skreq->retries < SKD_MAX_RETRIES) {
fcd37eb3 1434 skd_log_skreq(skdev, skreq, "retry");
6d1f9dfd 1435 blk_mq_requeue_request(req, true);
fcd37eb3 1436 break;
e67f86b3 1437 }
ce6882ba 1438 /* fall through */
e67f86b3
AB
1439
1440 case SKD_CHECK_STATUS_REPORT_ERROR:
1441 default:
795bc1b5
BVA
1442 skreq->status = BLK_STS_IOERR;
1443 blk_mq_complete_request(req);
e67f86b3
AB
1444 break;
1445 }
1446}
1447
e67f86b3
AB
1448static void skd_release_skreq(struct skd_device *skdev,
1449 struct skd_request_context *skreq)
1450{
e67f86b3
AB
1451 /*
1452 * Reclaim the skd_request_context
1453 */
1454 skreq->state = SKD_REQ_STATE_IDLE;
f18c17c8
BVA
1455}
1456
e67f86b3
AB
1457static int skd_isr_completion_posted(struct skd_device *skdev,
1458 int limit, int *enqueued)
1459{
85e34112
BVA
1460 struct fit_completion_entry_v1 *skcmp;
1461 struct fit_comp_error_info *skerr;
e67f86b3 1462 u16 req_id;
f18c17c8 1463 u32 tag;
ca33dd92 1464 u16 hwq = 0;
f18c17c8 1465 struct request *rq;
e67f86b3 1466 struct skd_request_context *skreq;
c830da8c
BVA
1467 u16 cmp_cntxt;
1468 u8 cmp_status;
1469 u8 cmp_cycle;
1470 u32 cmp_bytes;
c0b3dda7 1471 int rc = 0;
e67f86b3 1472 int processed = 0;
e67f86b3 1473
760b48ca
BVA
1474 lockdep_assert_held(&skdev->lock);
1475
e67f86b3
AB
1476 for (;; ) {
1477 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
1478
1479 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
1480 cmp_cycle = skcmp->cycle;
1481 cmp_cntxt = skcmp->tag;
1482 cmp_status = skcmp->status;
1483 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
1484
1485 skerr = &skdev->skerr_table[skdev->skcomp_ix];
1486
f98806d6
BVA
1487 dev_dbg(&skdev->pdev->dev,
1488 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
1489 skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
d4d0f5fc 1490 cmp_cntxt, cmp_status, skd_in_flight(skdev),
6fbb2de5 1491 cmp_bytes, skdev->proto_ver);
e67f86b3
AB
1492
1493 if (cmp_cycle != skdev->skcomp_cycle) {
f98806d6 1494 dev_dbg(&skdev->pdev->dev, "end of completions\n");
e67f86b3
AB
1495 break;
1496 }
1497 /*
1498 * Update the completion queue head index and possibly
1499 * the completion cycle count. 8-bit wrap-around.
1500 */
1501 skdev->skcomp_ix++;
1502 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
1503 skdev->skcomp_ix = 0;
1504 skdev->skcomp_cycle++;
1505 }
1506
1507 /*
1508 * The command context is a unique 32-bit ID. The low order
1509 * bits help locate the request. The request is usually a
1510 * r/w request (see skd_start() above) or a special request.
1511 */
1512 req_id = cmp_cntxt;
f18c17c8 1513 tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
e67f86b3
AB
1514
1515 /* Is this other than a r/w request? */
f18c17c8 1516 if (tag >= skdev->num_req_context) {
e67f86b3
AB
1517 /*
1518 * This is not a completion for a r/w request.
1519 */
ca33dd92
BVA
1520 WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq],
1521 tag));
e67f86b3
AB
1522 skd_complete_other(skdev, skcmp, skerr);
1523 continue;
1524 }
1525
ca33dd92 1526 rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
f18c17c8
BVA
1527 if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
1528 tag))
1529 continue;
e7278a8b 1530 skreq = blk_mq_rq_to_pdu(rq);
e67f86b3
AB
1531
1532 /*
1533 * Make sure the request ID for the slot matches.
1534 */
1535 if (skreq->id != req_id) {
49f16e2f
BVA
1536 dev_err(&skdev->pdev->dev,
1537 "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
1538 req_id, skreq->id, cmp_cntxt);
e67f86b3 1539
49f16e2f 1540 continue;
e67f86b3
AB
1541 }
1542
1543 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
1544
e67f86b3
AB
1545 skreq->completion = *skcmp;
1546 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
1547 skreq->err_info = *skerr;
1548 skd_log_check_status(skdev, cmp_status, skerr->key,
1549 skerr->code, skerr->qual,
1550 skerr->fruc);
1551 }
1552 /* Release DMA resources for the request. */
1553 if (skreq->n_sg > 0)
1554 skd_postop_sg_list(skdev, skreq);
1555
f18c17c8 1556 skd_release_skreq(skdev, skreq);
e67f86b3
AB
1557
1558 /*
f18c17c8 1559 * Capture the outcome and post it back to the native request.
e67f86b3 1560 */
795bc1b5
BVA
1561 if (likely(cmp_status == SAM_STAT_GOOD)) {
1562 skreq->status = BLK_STS_OK;
1563 blk_mq_complete_request(rq);
1564 } else {
f18c17c8 1565 skd_resolve_req_exception(skdev, skreq, rq);
795bc1b5 1566 }
e67f86b3
AB
1567
1568 /* skd_isr_comp_limit equal zero means no limit */
1569 if (limit) {
1570 if (++processed >= limit) {
1571 rc = 1;
1572 break;
1573 }
1574 }
1575 }
1576
6fbb2de5 1577 if (skdev->state == SKD_DRVR_STATE_PAUSING &&
d4d0f5fc 1578 skd_in_flight(skdev) == 0) {
e67f86b3
AB
1579 skdev->state = SKD_DRVR_STATE_PAUSED;
1580 wake_up_interruptible(&skdev->waitq);
1581 }
1582
1583 return rc;
1584}
1585
1586static void skd_complete_other(struct skd_device *skdev,
85e34112
BVA
1587 struct fit_completion_entry_v1 *skcomp,
1588 struct fit_comp_error_info *skerr)
e67f86b3
AB
1589{
1590 u32 req_id = 0;
1591 u32 req_table;
1592 u32 req_slot;
1593 struct skd_special_context *skspcl;
1594
760b48ca
BVA
1595 lockdep_assert_held(&skdev->lock);
1596
e67f86b3
AB
1597 req_id = skcomp->tag;
1598 req_table = req_id & SKD_ID_TABLE_MASK;
1599 req_slot = req_id & SKD_ID_SLOT_MASK;
1600
f98806d6
BVA
1601 dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
1602 req_id, req_slot);
e67f86b3
AB
1603
1604 /*
1605 * Based on the request id, determine how to dispatch this completion.
1606 * This swich/case is finding the good cases and forwarding the
1607 * completion entry. Errors are reported below the switch.
1608 */
1609 switch (req_table) {
1610 case SKD_ID_RW_REQUEST:
1611 /*
e1d06f2d 1612 * The caller, skd_isr_completion_posted() above,
e67f86b3
AB
1613 * handles r/w requests. The only way we get here
1614 * is if the req_slot is out of bounds.
1615 */
1616 break;
1617
e67f86b3
AB
1618 case SKD_ID_INTERNAL:
1619 if (req_slot == 0) {
1620 skspcl = &skdev->internal_skspcl;
1621 if (skspcl->req.id == req_id &&
1622 skspcl->req.state == SKD_REQ_STATE_BUSY) {
1623 skd_complete_internal(skdev,
1624 skcomp, skerr, skspcl);
1625 return;
1626 }
1627 }
1628 break;
1629
1630 case SKD_ID_FIT_MSG:
1631 /*
1632 * These id's should never appear in a completion record.
1633 */
1634 break;
1635
1636 default:
1637 /*
1638 * These id's should never appear anywhere;
1639 */
1640 break;
1641 }
1642
1643 /*
1644 * If we get here it is a bad or stale id.
1645 */
1646}
1647
e67f86b3
AB
1648static void skd_reset_skcomp(struct skd_device *skdev)
1649{
6f7c7675 1650 memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
e67f86b3
AB
1651
1652 skdev->skcomp_ix = 0;
1653 skdev->skcomp_cycle = 1;
1654}
1655
1656/*
1657 *****************************************************************************
1658 * INTERRUPTS
1659 *****************************************************************************
1660 */
1661static void skd_completion_worker(struct work_struct *work)
1662{
1663 struct skd_device *skdev =
1664 container_of(work, struct skd_device, completion_worker);
1665 unsigned long flags;
1666 int flush_enqueued = 0;
1667
1668 spin_lock_irqsave(&skdev->lock, flags);
1669
1670 /*
1671 * pass in limit=0, which means no limit..
1672 * process everything in compq
1673 */
1674 skd_isr_completion_posted(skdev, 0, &flush_enqueued);
ca33dd92 1675 schedule_work(&skdev->start_queue);
e67f86b3
AB
1676
1677 spin_unlock_irqrestore(&skdev->lock, flags);
1678}
1679
1680static void skd_isr_msg_from_dev(struct skd_device *skdev);
1681
41c9499b
AB
1682static irqreturn_t
1683skd_isr(int irq, void *ptr)
e67f86b3 1684{
1cd3c1ab 1685 struct skd_device *skdev = ptr;
e67f86b3
AB
1686 u32 intstat;
1687 u32 ack;
1688 int rc = 0;
1689 int deferred = 0;
1690 int flush_enqueued = 0;
1691
e67f86b3
AB
1692 spin_lock(&skdev->lock);
1693
1694 for (;; ) {
1695 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
1696
1697 ack = FIT_INT_DEF_MASK;
1698 ack &= intstat;
1699
f98806d6
BVA
1700 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
1701 ack);
e67f86b3
AB
1702
1703 /* As long as there is an int pending on device, keep
1704 * running loop. When none, get out, but if we've never
1705 * done any processing, call completion handler?
1706 */
1707 if (ack == 0) {
1708 /* No interrupts on device, but run the completion
1709 * processor anyway?
1710 */
1711 if (rc == 0)
1712 if (likely (skdev->state
1713 == SKD_DRVR_STATE_ONLINE))
1714 deferred = 1;
1715 break;
1716 }
1717
1718 rc = IRQ_HANDLED;
1719
1720 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
1721
1722 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
1723 (skdev->state != SKD_DRVR_STATE_STOPPING))) {
1724 if (intstat & FIT_ISH_COMPLETION_POSTED) {
1725 /*
1726 * If we have already deferred completion
1727 * processing, don't bother running it again
1728 */
1729 if (deferred == 0)
1730 deferred =
1731 skd_isr_completion_posted(skdev,
1732 skd_isr_comp_limit, &flush_enqueued);
1733 }
1734
1735 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
1736 skd_isr_fwstate(skdev);
1737 if (skdev->state == SKD_DRVR_STATE_FAULT ||
1738 skdev->state ==
1739 SKD_DRVR_STATE_DISAPPEARED) {
1740 spin_unlock(&skdev->lock);
1741 return rc;
1742 }
1743 }
1744
1745 if (intstat & FIT_ISH_MSG_FROM_DEV)
1746 skd_isr_msg_from_dev(skdev);
1747 }
1748 }
1749
1750 if (unlikely(flush_enqueued))
ca33dd92 1751 schedule_work(&skdev->start_queue);
e67f86b3
AB
1752
1753 if (deferred)
1754 schedule_work(&skdev->completion_worker);
1755 else if (!flush_enqueued)
ca33dd92 1756 schedule_work(&skdev->start_queue);
e67f86b3
AB
1757
1758 spin_unlock(&skdev->lock);
1759
1760 return rc;
1761}
1762
e67f86b3
AB
1763static void skd_drive_fault(struct skd_device *skdev)
1764{
1765 skdev->state = SKD_DRVR_STATE_FAULT;
f98806d6 1766 dev_err(&skdev->pdev->dev, "Drive FAULT\n");
e67f86b3
AB
1767}
1768
1769static void skd_drive_disappeared(struct skd_device *skdev)
1770{
1771 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
f98806d6 1772 dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
e67f86b3
AB
1773}
1774
1775static void skd_isr_fwstate(struct skd_device *skdev)
1776{
1777 u32 sense;
1778 u32 state;
1779 u32 mtd;
1780 int prev_driver_state = skdev->state;
1781
1782 sense = SKD_READL(skdev, FIT_STATUS);
1783 state = sense & FIT_SR_DRIVE_STATE_MASK;
1784
f98806d6
BVA
1785 dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
1786 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
1787 skd_drive_state_to_str(state), state);
e67f86b3
AB
1788
1789 skdev->drive_state = state;
1790
1791 switch (skdev->drive_state) {
1792 case FIT_SR_DRIVE_INIT:
1793 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
1794 skd_disable_interrupts(skdev);
1795 break;
1796 }
1797 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
79ce12a8 1798 skd_recover_requests(skdev);
e67f86b3
AB
1799 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
1800 skdev->timer_countdown = SKD_STARTING_TIMO;
1801 skdev->state = SKD_DRVR_STATE_STARTING;
1802 skd_soft_reset(skdev);
1803 break;
1804 }
1805 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
1806 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1807 skdev->last_mtd = mtd;
1808 break;
1809
1810 case FIT_SR_DRIVE_ONLINE:
1811 skdev->cur_max_queue_depth = skd_max_queue_depth;
1812 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
1813 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
1814
1815 skdev->queue_low_water_mark =
1816 skdev->cur_max_queue_depth * 2 / 3 + 1;
1817 if (skdev->queue_low_water_mark < 1)
1818 skdev->queue_low_water_mark = 1;
f98806d6
BVA
1819 dev_info(&skdev->pdev->dev,
1820 "Queue depth limit=%d dev=%d lowat=%d\n",
1821 skdev->cur_max_queue_depth,
1822 skdev->dev_max_queue_depth,
1823 skdev->queue_low_water_mark);
e67f86b3
AB
1824
1825 skd_refresh_device_data(skdev);
1826 break;
1827
1828 case FIT_SR_DRIVE_BUSY:
1829 skdev->state = SKD_DRVR_STATE_BUSY;
1830 skdev->timer_countdown = SKD_BUSY_TIMO;
1831 skd_quiesce_dev(skdev);
1832 break;
1833 case FIT_SR_DRIVE_BUSY_SANITIZE:
1834 /* set timer for 3 seconds, we'll abort any unfinished
1835 * commands after that expires
1836 */
1837 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
1838 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
ca33dd92 1839 schedule_work(&skdev->start_queue);
e67f86b3
AB
1840 break;
1841 case FIT_SR_DRIVE_BUSY_ERASE:
1842 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
1843 skdev->timer_countdown = SKD_BUSY_TIMO;
1844 break;
1845 case FIT_SR_DRIVE_OFFLINE:
1846 skdev->state = SKD_DRVR_STATE_IDLE;
1847 break;
1848 case FIT_SR_DRIVE_SOFT_RESET:
1849 switch (skdev->state) {
1850 case SKD_DRVR_STATE_STARTING:
1851 case SKD_DRVR_STATE_RESTARTING:
1852 /* Expected by a caller of skd_soft_reset() */
1853 break;
1854 default:
1855 skdev->state = SKD_DRVR_STATE_RESTARTING;
1856 break;
1857 }
1858 break;
1859 case FIT_SR_DRIVE_FW_BOOTING:
f98806d6 1860 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
e67f86b3
AB
1861 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
1862 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
1863 break;
1864
1865 case FIT_SR_DRIVE_DEGRADED:
1866 case FIT_SR_PCIE_LINK_DOWN:
1867 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
1868 break;
1869
1870 case FIT_SR_DRIVE_FAULT:
1871 skd_drive_fault(skdev);
79ce12a8 1872 skd_recover_requests(skdev);
ca33dd92 1873 schedule_work(&skdev->start_queue);
e67f86b3
AB
1874 break;
1875
1876 /* PCIe bus returned all Fs? */
1877 case 0xFF:
f98806d6
BVA
1878 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
1879 sense);
e67f86b3 1880 skd_drive_disappeared(skdev);
79ce12a8 1881 skd_recover_requests(skdev);
ca33dd92 1882 schedule_work(&skdev->start_queue);
e67f86b3
AB
1883 break;
1884 default:
1885 /*
1886 * Uknown FW State. Wait for a state we recognize.
1887 */
1888 break;
1889 }
f98806d6
BVA
1890 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
1891 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
1892 skd_skdev_state_to_str(skdev->state), skdev->state);
e67f86b3
AB
1893}
1894
7baa8572 1895static bool skd_recover_request(struct request *req, void *data, bool reserved)
e67f86b3 1896{
ca33dd92
BVA
1897 struct skd_device *const skdev = data;
1898 struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
e67f86b3 1899
4e54b849 1900 if (skreq->state != SKD_REQ_STATE_BUSY)
7baa8572 1901 return true;
e67f86b3 1902
4e54b849 1903 skd_log_skreq(skdev, skreq, "recover");
e67f86b3 1904
4e54b849
BVA
1905 /* Release DMA resources for the request. */
1906 if (skreq->n_sg > 0)
1907 skd_postop_sg_list(skdev, skreq);
e67f86b3 1908
4e54b849 1909 skreq->state = SKD_REQ_STATE_IDLE;
795bc1b5
BVA
1910 skreq->status = BLK_STS_IOERR;
1911 blk_mq_complete_request(req);
7baa8572 1912 return true;
4e54b849 1913}
e67f86b3 1914
4e54b849
BVA
1915static void skd_recover_requests(struct skd_device *skdev)
1916{
ca33dd92 1917 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev);
e67f86b3
AB
1918}
1919
1920static void skd_isr_msg_from_dev(struct skd_device *skdev)
1921{
1922 u32 mfd;
1923 u32 mtd;
1924 u32 data;
1925
1926 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
1927
f98806d6
BVA
1928 dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
1929 skdev->last_mtd);
e67f86b3
AB
1930
1931 /* ignore any mtd that is an ack for something we didn't send */
1932 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
1933 return;
1934
1935 switch (FIT_MXD_TYPE(mfd)) {
1936 case FIT_MTD_FITFW_INIT:
1937 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
1938
1939 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
f98806d6
BVA
1940 dev_err(&skdev->pdev->dev, "protocol mismatch\n");
1941 dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
1942 skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
1943 dev_err(&skdev->pdev->dev, " please upgrade driver\n");
e67f86b3
AB
1944 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
1945 skd_soft_reset(skdev);
1946 break;
1947 }
1948 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
1949 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1950 skdev->last_mtd = mtd;
1951 break;
1952
1953 case FIT_MTD_GET_CMDQ_DEPTH:
1954 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
1955 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
1956 SKD_N_COMPLETION_ENTRY);
1957 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1958 skdev->last_mtd = mtd;
1959 break;
1960
1961 case FIT_MTD_SET_COMPQ_DEPTH:
1962 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
1963 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
1964 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1965 skdev->last_mtd = mtd;
1966 break;
1967
1968 case FIT_MTD_SET_COMPQ_ADDR:
1969 skd_reset_skcomp(skdev);
1970 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
1971 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1972 skdev->last_mtd = mtd;
1973 break;
1974
1975 case FIT_MTD_CMD_LOG_HOST_ID:
474f5da2
AB
1976 /* hardware interface overflows in y2106 */
1977 skdev->connect_time_stamp = (u32)ktime_get_real_seconds();
e67f86b3
AB
1978 data = skdev->connect_time_stamp & 0xFFFF;
1979 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
1980 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1981 skdev->last_mtd = mtd;
1982 break;
1983
1984 case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
1985 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
1986 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
1987 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
1988 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1989 skdev->last_mtd = mtd;
1990 break;
1991
1992 case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
1993 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
1994 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
1995 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1996 skdev->last_mtd = mtd;
1997
f98806d6
BVA
1998 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
1999 skdev->connect_time_stamp, skdev->drive_jiffies);
e67f86b3
AB
2000 break;
2001
2002 case FIT_MTD_ARM_QUEUE:
2003 skdev->last_mtd = 0;
2004 /*
2005 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
2006 */
2007 break;
2008
2009 default:
2010 break;
2011 }
2012}
2013
2014static void skd_disable_interrupts(struct skd_device *skdev)
2015{
2016 u32 sense;
2017
2018 sense = SKD_READL(skdev, FIT_CONTROL);
2019 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
2020 SKD_WRITEL(skdev, sense, FIT_CONTROL);
f98806d6 2021 dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
e67f86b3
AB
2022
2023 /* Note that the 1s is written. A 1-bit means
2024 * disable, a 0 means enable.
2025 */
2026 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
2027}
2028
2029static void skd_enable_interrupts(struct skd_device *skdev)
2030{
2031 u32 val;
2032
2033 /* unmask interrupts first */
2034 val = FIT_ISH_FW_STATE_CHANGE +
2035 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
2036
2037 /* Note that the compliment of mask is written. A 1-bit means
2038 * disable, a 0 means enable. */
2039 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
f98806d6 2040 dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
e67f86b3
AB
2041
2042 val = SKD_READL(skdev, FIT_CONTROL);
2043 val |= FIT_CR_ENABLE_INTERRUPTS;
f98806d6 2044 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
e67f86b3
AB
2045 SKD_WRITEL(skdev, val, FIT_CONTROL);
2046}
2047
2048/*
2049 *****************************************************************************
2050 * START, STOP, RESTART, QUIESCE, UNQUIESCE
2051 *****************************************************************************
2052 */
2053
2054static void skd_soft_reset(struct skd_device *skdev)
2055{
2056 u32 val;
2057
2058 val = SKD_READL(skdev, FIT_CONTROL);
2059 val |= (FIT_CR_SOFT_RESET);
f98806d6 2060 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
e67f86b3
AB
2061 SKD_WRITEL(skdev, val, FIT_CONTROL);
2062}
2063
2064static void skd_start_device(struct skd_device *skdev)
2065{
2066 unsigned long flags;
2067 u32 sense;
2068 u32 state;
2069
2070 spin_lock_irqsave(&skdev->lock, flags);
2071
2072 /* ack all ghost interrupts */
2073 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2074
2075 sense = SKD_READL(skdev, FIT_STATUS);
2076
f98806d6 2077 dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
e67f86b3
AB
2078
2079 state = sense & FIT_SR_DRIVE_STATE_MASK;
2080 skdev->drive_state = state;
2081 skdev->last_mtd = 0;
2082
2083 skdev->state = SKD_DRVR_STATE_STARTING;
2084 skdev->timer_countdown = SKD_STARTING_TIMO;
2085
2086 skd_enable_interrupts(skdev);
2087
2088 switch (skdev->drive_state) {
2089 case FIT_SR_DRIVE_OFFLINE:
f98806d6 2090 dev_err(&skdev->pdev->dev, "Drive offline...\n");
e67f86b3
AB
2091 break;
2092
2093 case FIT_SR_DRIVE_FW_BOOTING:
f98806d6 2094 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
e67f86b3
AB
2095 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2096 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
2097 break;
2098
2099 case FIT_SR_DRIVE_BUSY_SANITIZE:
f98806d6 2100 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
e67f86b3
AB
2101 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2102 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2103 break;
2104
2105 case FIT_SR_DRIVE_BUSY_ERASE:
f98806d6 2106 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
e67f86b3
AB
2107 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2108 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2109 break;
2110
2111 case FIT_SR_DRIVE_INIT:
2112 case FIT_SR_DRIVE_ONLINE:
2113 skd_soft_reset(skdev);
2114 break;
2115
2116 case FIT_SR_DRIVE_BUSY:
f98806d6 2117 dev_err(&skdev->pdev->dev, "Drive Busy...\n");
e67f86b3
AB
2118 skdev->state = SKD_DRVR_STATE_BUSY;
2119 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2120 break;
2121
2122 case FIT_SR_DRIVE_SOFT_RESET:
f98806d6 2123 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
e67f86b3
AB
2124 break;
2125
2126 case FIT_SR_DRIVE_FAULT:
2127 /* Fault state is bad...soft reset won't do it...
2128 * Hard reset, maybe, but does it work on device?
2129 * For now, just fault so the system doesn't hang.
2130 */
2131 skd_drive_fault(skdev);
2132 /*start the queue so we can respond with error to requests */
f98806d6 2133 dev_dbg(&skdev->pdev->dev, "starting queue\n");
ca33dd92 2134 schedule_work(&skdev->start_queue);
e67f86b3
AB
2135 skdev->gendisk_on = -1;
2136 wake_up_interruptible(&skdev->waitq);
2137 break;
2138
2139 case 0xFF:
2140 /* Most likely the device isn't there or isn't responding
2141 * to the BAR1 addresses. */
2142 skd_drive_disappeared(skdev);
2143 /*start the queue so we can respond with error to requests */
f98806d6
BVA
2144 dev_dbg(&skdev->pdev->dev,
2145 "starting queue to error-out reqs\n");
ca33dd92 2146 schedule_work(&skdev->start_queue);
e67f86b3
AB
2147 skdev->gendisk_on = -1;
2148 wake_up_interruptible(&skdev->waitq);
2149 break;
2150
2151 default:
f98806d6
BVA
2152 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
2153 skdev->drive_state);
e67f86b3
AB
2154 break;
2155 }
2156
2157 state = SKD_READL(skdev, FIT_CONTROL);
f98806d6 2158 dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
e67f86b3
AB
2159
2160 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
f98806d6 2161 dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
e67f86b3
AB
2162
2163 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
f98806d6 2164 dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
e67f86b3
AB
2165
2166 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
f98806d6 2167 dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
e67f86b3
AB
2168
2169 state = SKD_READL(skdev, FIT_HW_VERSION);
f98806d6 2170 dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
e67f86b3
AB
2171
2172 spin_unlock_irqrestore(&skdev->lock, flags);
2173}
2174
2175static void skd_stop_device(struct skd_device *skdev)
2176{
2177 unsigned long flags;
2178 struct skd_special_context *skspcl = &skdev->internal_skspcl;
2179 u32 dev_state;
2180 int i;
2181
2182 spin_lock_irqsave(&skdev->lock, flags);
2183
2184 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
f98806d6 2185 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
e67f86b3
AB
2186 goto stop_out;
2187 }
2188
2189 if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
f98806d6 2190 dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
e67f86b3
AB
2191 goto stop_out;
2192 }
2193
2194 skdev->state = SKD_DRVR_STATE_SYNCING;
2195 skdev->sync_done = 0;
2196
2197 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
2198
2199 spin_unlock_irqrestore(&skdev->lock, flags);
2200
2201 wait_event_interruptible_timeout(skdev->waitq,
2202 (skdev->sync_done), (10 * HZ));
2203
2204 spin_lock_irqsave(&skdev->lock, flags);
2205
2206 switch (skdev->sync_done) {
2207 case 0:
f98806d6 2208 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
e67f86b3
AB
2209 break;
2210 case 1:
f98806d6 2211 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
e67f86b3
AB
2212 break;
2213 default:
f98806d6 2214 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
e67f86b3
AB
2215 }
2216
2217stop_out:
2218 skdev->state = SKD_DRVR_STATE_STOPPING;
2219 spin_unlock_irqrestore(&skdev->lock, flags);
2220
2221 skd_kill_timer(skdev);
2222
2223 spin_lock_irqsave(&skdev->lock, flags);
2224 skd_disable_interrupts(skdev);
2225
2226 /* ensure all ints on device are cleared */
2227 /* soft reset the device to unload with a clean slate */
2228 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2229 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
2230
2231 spin_unlock_irqrestore(&skdev->lock, flags);
2232
2233 /* poll every 100ms, 1 second timeout */
2234 for (i = 0; i < 10; i++) {
2235 dev_state =
2236 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
2237 if (dev_state == FIT_SR_DRIVE_INIT)
2238 break;
2239 set_current_state(TASK_INTERRUPTIBLE);
2240 schedule_timeout(msecs_to_jiffies(100));
2241 }
2242
2243 if (dev_state != FIT_SR_DRIVE_INIT)
f98806d6
BVA
2244 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
2245 dev_state);
e67f86b3
AB
2246}
2247
2248/* assume spinlock is held */
2249static void skd_restart_device(struct skd_device *skdev)
2250{
2251 u32 state;
2252
2253 /* ack all ghost interrupts */
2254 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2255
2256 state = SKD_READL(skdev, FIT_STATUS);
2257
f98806d6 2258 dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
e67f86b3
AB
2259
2260 state &= FIT_SR_DRIVE_STATE_MASK;
2261 skdev->drive_state = state;
2262 skdev->last_mtd = 0;
2263
2264 skdev->state = SKD_DRVR_STATE_RESTARTING;
2265 skdev->timer_countdown = SKD_RESTARTING_TIMO;
2266
2267 skd_soft_reset(skdev);
2268}
2269
2270/* assume spinlock is held */
2271static int skd_quiesce_dev(struct skd_device *skdev)
2272{
2273 int rc = 0;
2274
2275 switch (skdev->state) {
2276 case SKD_DRVR_STATE_BUSY:
2277 case SKD_DRVR_STATE_BUSY_IMMINENT:
f98806d6 2278 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
ca33dd92 2279 blk_mq_stop_hw_queues(skdev->queue);
e67f86b3
AB
2280 break;
2281 case SKD_DRVR_STATE_ONLINE:
2282 case SKD_DRVR_STATE_STOPPING:
2283 case SKD_DRVR_STATE_SYNCING:
2284 case SKD_DRVR_STATE_PAUSING:
2285 case SKD_DRVR_STATE_PAUSED:
2286 case SKD_DRVR_STATE_STARTING:
2287 case SKD_DRVR_STATE_RESTARTING:
2288 case SKD_DRVR_STATE_RESUMING:
2289 default:
2290 rc = -EINVAL;
f98806d6
BVA
2291 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
2292 skdev->state);
e67f86b3
AB
2293 }
2294 return rc;
2295}
2296
2297/* assume spinlock is held */
2298static int skd_unquiesce_dev(struct skd_device *skdev)
2299{
2300 int prev_driver_state = skdev->state;
2301
2302 skd_log_skdev(skdev, "unquiesce");
2303 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
f98806d6 2304 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
e67f86b3
AB
2305 return 0;
2306 }
2307 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
2308 /*
2309 * If there has been an state change to other than
2310 * ONLINE, we will rely on controller state change
2311 * to come back online and restart the queue.
2312 * The BUSY state means that driver is ready to
2313 * continue normal processing but waiting for controller
2314 * to become available.
2315 */
2316 skdev->state = SKD_DRVR_STATE_BUSY;
f98806d6 2317 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
e67f86b3
AB
2318 return 0;
2319 }
2320
2321 /*
2322 * Drive has just come online, driver is either in startup,
2323 * paused performing a task, or bust waiting for hardware.
2324 */
2325 switch (skdev->state) {
2326 case SKD_DRVR_STATE_PAUSED:
2327 case SKD_DRVR_STATE_BUSY:
2328 case SKD_DRVR_STATE_BUSY_IMMINENT:
2329 case SKD_DRVR_STATE_BUSY_ERASE:
2330 case SKD_DRVR_STATE_STARTING:
2331 case SKD_DRVR_STATE_RESTARTING:
2332 case SKD_DRVR_STATE_FAULT:
2333 case SKD_DRVR_STATE_IDLE:
2334 case SKD_DRVR_STATE_LOAD:
2335 skdev->state = SKD_DRVR_STATE_ONLINE;
f98806d6
BVA
2336 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
2337 skd_skdev_state_to_str(prev_driver_state),
2338 prev_driver_state, skd_skdev_state_to_str(skdev->state),
2339 skdev->state);
2340 dev_dbg(&skdev->pdev->dev,
2341 "**** device ONLINE...starting block queue\n");
2342 dev_dbg(&skdev->pdev->dev, "starting queue\n");
2343 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
ca33dd92 2344 schedule_work(&skdev->start_queue);
e67f86b3
AB
2345 skdev->gendisk_on = 1;
2346 wake_up_interruptible(&skdev->waitq);
2347 break;
2348
2349 case SKD_DRVR_STATE_DISAPPEARED:
2350 default:
f98806d6
BVA
2351 dev_dbg(&skdev->pdev->dev,
2352 "**** driver state %d, not implemented\n",
2353 skdev->state);
e67f86b3
AB
2354 return -EBUSY;
2355 }
2356 return 0;
2357}
2358
2359/*
2360 *****************************************************************************
2361 * PCIe MSI/MSI-X INTERRUPT HANDLERS
2362 *****************************************************************************
2363 */
2364
2365static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
2366{
2367 struct skd_device *skdev = skd_host_data;
2368 unsigned long flags;
2369
2370 spin_lock_irqsave(&skdev->lock, flags);
f98806d6
BVA
2371 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2372 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2373 dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
2374 SKD_READL(skdev, FIT_INT_STATUS_HOST));
e67f86b3
AB
2375 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
2376 spin_unlock_irqrestore(&skdev->lock, flags);
2377 return IRQ_HANDLED;
2378}
2379
2380static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
2381{
2382 struct skd_device *skdev = skd_host_data;
2383 unsigned long flags;
2384
2385 spin_lock_irqsave(&skdev->lock, flags);
f98806d6
BVA
2386 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2387 SKD_READL(skdev, FIT_INT_STATUS_HOST));
e67f86b3
AB
2388 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
2389 skd_isr_fwstate(skdev);
2390 spin_unlock_irqrestore(&skdev->lock, flags);
2391 return IRQ_HANDLED;
2392}
2393
2394static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
2395{
2396 struct skd_device *skdev = skd_host_data;
2397 unsigned long flags;
2398 int flush_enqueued = 0;
2399 int deferred;
2400
2401 spin_lock_irqsave(&skdev->lock, flags);
f98806d6
BVA
2402 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2403 SKD_READL(skdev, FIT_INT_STATUS_HOST));
e67f86b3
AB
2404 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
2405 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
2406 &flush_enqueued);
e67f86b3 2407 if (flush_enqueued)
ca33dd92 2408 schedule_work(&skdev->start_queue);
e67f86b3
AB
2409
2410 if (deferred)
2411 schedule_work(&skdev->completion_worker);
2412 else if (!flush_enqueued)
ca33dd92 2413 schedule_work(&skdev->start_queue);
e67f86b3
AB
2414
2415 spin_unlock_irqrestore(&skdev->lock, flags);
2416
2417 return IRQ_HANDLED;
2418}
2419
2420static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
2421{
2422 struct skd_device *skdev = skd_host_data;
2423 unsigned long flags;
2424
2425 spin_lock_irqsave(&skdev->lock, flags);
f98806d6
BVA
2426 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2427 SKD_READL(skdev, FIT_INT_STATUS_HOST));
e67f86b3
AB
2428 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
2429 skd_isr_msg_from_dev(skdev);
2430 spin_unlock_irqrestore(&skdev->lock, flags);
2431 return IRQ_HANDLED;
2432}
2433
2434static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
2435{
2436 struct skd_device *skdev = skd_host_data;
2437 unsigned long flags;
2438
2439 spin_lock_irqsave(&skdev->lock, flags);
f98806d6
BVA
2440 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2441 SKD_READL(skdev, FIT_INT_STATUS_HOST));
e67f86b3
AB
2442 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
2443 spin_unlock_irqrestore(&skdev->lock, flags);
2444 return IRQ_HANDLED;
2445}
2446
2447/*
2448 *****************************************************************************
2449 * PCIe MSI/MSI-X SETUP
2450 *****************************************************************************
2451 */
2452
2453struct skd_msix_entry {
e67f86b3
AB
2454 char isr_name[30];
2455};
2456
2457struct skd_init_msix_entry {
2458 const char *name;
2459 irq_handler_t handler;
2460};
2461
2462#define SKD_MAX_MSIX_COUNT 13
2463#define SKD_MIN_MSIX_COUNT 7
2464#define SKD_BASE_MSIX_IRQ 4
2465
2466static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
2467 { "(DMA 0)", skd_reserved_isr },
2468 { "(DMA 1)", skd_reserved_isr },
2469 { "(DMA 2)", skd_reserved_isr },
2470 { "(DMA 3)", skd_reserved_isr },
2471 { "(State Change)", skd_statec_isr },
2472 { "(COMPL_Q)", skd_comp_q },
2473 { "(MSG)", skd_msg_isr },
2474 { "(Reserved)", skd_reserved_isr },
2475 { "(Reserved)", skd_reserved_isr },
2476 { "(Queue Full 0)", skd_qfull_isr },
2477 { "(Queue Full 1)", skd_qfull_isr },
2478 { "(Queue Full 2)", skd_qfull_isr },
2479 { "(Queue Full 3)", skd_qfull_isr },
2480};
2481
e67f86b3
AB
2482static int skd_acquire_msix(struct skd_device *skdev)
2483{
a9df8625 2484 int i, rc;
46817769 2485 struct pci_dev *pdev = skdev->pdev;
e67f86b3 2486
180b0ae7
CH
2487 rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
2488 PCI_IRQ_MSIX);
2489 if (rc < 0) {
f98806d6 2490 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
3bc8492f 2491 goto out;
e67f86b3 2492 }
46817769 2493
180b0ae7
CH
2494 skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
2495 sizeof(struct skd_msix_entry), GFP_KERNEL);
e67f86b3
AB
2496 if (!skdev->msix_entries) {
2497 rc = -ENOMEM;
f98806d6 2498 dev_err(&skdev->pdev->dev, "msix table allocation error\n");
3bc8492f 2499 goto out;
e67f86b3
AB
2500 }
2501
e67f86b3 2502 /* Enable MSI-X vectors for the base queue */
180b0ae7
CH
2503 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
2504 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
2505
e67f86b3
AB
2506 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
2507 "%s%d-msix %s", DRV_NAME, skdev->devno,
2508 msix_entries[i].name);
180b0ae7
CH
2509
2510 rc = devm_request_irq(&skdev->pdev->dev,
2511 pci_irq_vector(skdev->pdev, i),
2512 msix_entries[i].handler, 0,
2513 qentry->isr_name, skdev);
e67f86b3 2514 if (rc) {
f98806d6
BVA
2515 dev_err(&skdev->pdev->dev,
2516 "Unable to register(%d) MSI-X handler %d: %s\n",
2517 rc, i, qentry->isr_name);
e67f86b3 2518 goto msix_out;
e67f86b3
AB
2519 }
2520 }
180b0ae7 2521
f98806d6
BVA
2522 dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
2523 SKD_MAX_MSIX_COUNT);
e67f86b3
AB
2524 return 0;
2525
2526msix_out:
180b0ae7
CH
2527 while (--i >= 0)
2528 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
3bc8492f 2529out:
180b0ae7
CH
2530 kfree(skdev->msix_entries);
2531 skdev->msix_entries = NULL;
e67f86b3
AB
2532 return rc;
2533}
2534
2535static int skd_acquire_irq(struct skd_device *skdev)
2536{
180b0ae7
CH
2537 struct pci_dev *pdev = skdev->pdev;
2538 unsigned int irq_flag = PCI_IRQ_LEGACY;
e67f86b3 2539 int rc;
e67f86b3 2540
180b0ae7 2541 if (skd_isr_type == SKD_IRQ_MSIX) {
e67f86b3
AB
2542 rc = skd_acquire_msix(skdev);
2543 if (!rc)
180b0ae7
CH
2544 return 0;
2545
f98806d6
BVA
2546 dev_err(&skdev->pdev->dev,
2547 "failed to enable MSI-X, re-trying with MSI %d\n", rc);
e67f86b3 2548 }
180b0ae7
CH
2549
2550 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
2551 skdev->devno);
2552
2553 if (skd_isr_type != SKD_IRQ_LEGACY)
2554 irq_flag |= PCI_IRQ_MSI;
2555 rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
2556 if (rc < 0) {
f98806d6
BVA
2557 dev_err(&skdev->pdev->dev,
2558 "failed to allocate the MSI interrupt %d\n", rc);
180b0ae7
CH
2559 return rc;
2560 }
2561
2562 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
2563 pdev->msi_enabled ? 0 : IRQF_SHARED,
2564 skdev->isr_name, skdev);
2565 if (rc) {
2566 pci_free_irq_vectors(pdev);
f98806d6
BVA
2567 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
2568 rc);
180b0ae7
CH
2569 return rc;
2570 }
2571
2572 return 0;
e67f86b3
AB
2573}
2574
2575static void skd_release_irq(struct skd_device *skdev)
2576{
180b0ae7
CH
2577 struct pci_dev *pdev = skdev->pdev;
2578
2579 if (skdev->msix_entries) {
2580 int i;
2581
2582 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
2583 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
2584 skdev);
2585 }
2586
2587 kfree(skdev->msix_entries);
2588 skdev->msix_entries = NULL;
2589 } else {
2590 devm_free_irq(&pdev->dev, pdev->irq, skdev);
e67f86b3 2591 }
180b0ae7
CH
2592
2593 pci_free_irq_vectors(pdev);
e67f86b3
AB
2594}
2595
2596/*
2597 *****************************************************************************
2598 * CONSTRUCT
2599 *****************************************************************************
2600 */
2601
a3db102d
BVA
2602static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
2603 dma_addr_t *dma_handle, gfp_t gfp,
2604 enum dma_data_direction dir)
2605{
2606 struct device *dev = &skdev->pdev->dev;
2607 void *buf;
2608
2609 buf = kmem_cache_alloc(s, gfp);
2610 if (!buf)
2611 return NULL;
1d518775
AB
2612 *dma_handle = dma_map_single(dev, buf,
2613 kmem_cache_size(s), dir);
a3db102d 2614 if (dma_mapping_error(dev, *dma_handle)) {
09aa97c7 2615 kmem_cache_free(s, buf);
a3db102d
BVA
2616 buf = NULL;
2617 }
2618 return buf;
2619}
2620
2621static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
2622 void *vaddr, dma_addr_t dma_handle,
2623 enum dma_data_direction dir)
2624{
2625 if (!vaddr)
2626 return;
2627
1d518775
AB
2628 dma_unmap_single(&skdev->pdev->dev, dma_handle,
2629 kmem_cache_size(s), dir);
a3db102d
BVA
2630 kmem_cache_free(s, vaddr);
2631}
2632
e67f86b3
AB
2633static int skd_cons_skcomp(struct skd_device *skdev)
2634{
2635 int rc = 0;
2636 struct fit_completion_entry_v1 *skcomp;
e67f86b3 2637
f98806d6 2638 dev_dbg(&skdev->pdev->dev,
6f7c7675
BVA
2639 "comp pci_alloc, total bytes %zd entries %d\n",
2640 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
e67f86b3 2641
750afb08
LC
2642 skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
2643 &skdev->cq_dma_address, GFP_KERNEL);
e67f86b3
AB
2644
2645 if (skcomp == NULL) {
2646 rc = -ENOMEM;
2647 goto err_out;
2648 }
2649
e67f86b3
AB
2650 skdev->skcomp_table = skcomp;
2651 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
2652 sizeof(*skcomp) *
2653 SKD_N_COMPLETION_ENTRY);
2654
2655err_out:
2656 return rc;
2657}
2658
2659static int skd_cons_skmsg(struct skd_device *skdev)
2660{
2661 int rc = 0;
2662 u32 i;
2663
f98806d6 2664 dev_dbg(&skdev->pdev->dev,
01433d0d 2665 "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
f98806d6
BVA
2666 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
2667 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
e67f86b3 2668
01433d0d
BVA
2669 skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
2670 sizeof(struct skd_fitmsg_context),
2671 GFP_KERNEL);
e67f86b3
AB
2672 if (skdev->skmsg_table == NULL) {
2673 rc = -ENOMEM;
2674 goto err_out;
2675 }
2676
2677 for (i = 0; i < skdev->num_fitmsg_context; i++) {
2678 struct skd_fitmsg_context *skmsg;
2679
2680 skmsg = &skdev->skmsg_table[i];
2681
2682 skmsg->id = i + SKD_ID_FIT_MSG;
2683
13812621
CH
2684 skmsg->msg_buf = dma_alloc_coherent(&skdev->pdev->dev,
2685 SKD_N_FITMSG_BYTES,
2686 &skmsg->mb_dma_address,
2687 GFP_KERNEL);
e67f86b3
AB
2688 if (skmsg->msg_buf == NULL) {
2689 rc = -ENOMEM;
2690 goto err_out;
2691 }
2692
6507f436
BVA
2693 WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
2694 (FIT_QCMD_ALIGN - 1),
ea870bb2
HD
2695 "not aligned: msg_buf %p mb_dma_address %pad\n",
2696 skmsg->msg_buf, &skmsg->mb_dma_address);
e67f86b3
AB
2697 }
2698
e67f86b3
AB
2699err_out:
2700 return rc;
2701}
2702
542d7b00
BZ
2703static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
2704 u32 n_sg,
2705 dma_addr_t *ret_dma_addr)
2706{
2707 struct fit_sg_descriptor *sg_list;
542d7b00 2708
a3db102d
BVA
2709 sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr,
2710 GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
542d7b00
BZ
2711
2712 if (sg_list != NULL) {
2713 uint64_t dma_address = *ret_dma_addr;
2714 u32 i;
2715
542d7b00
BZ
2716 for (i = 0; i < n_sg - 1; i++) {
2717 uint64_t ndp_off;
2718 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
2719
2720 sg_list[i].next_desc_ptr = dma_address + ndp_off;
2721 }
2722 sg_list[i].next_desc_ptr = 0LL;
2723 }
2724
2725 return sg_list;
2726}
2727
5d003240 2728static void skd_free_sg_list(struct skd_device *skdev,
a3db102d 2729 struct fit_sg_descriptor *sg_list,
5d003240
BVA
2730 dma_addr_t dma_addr)
2731{
5d003240
BVA
2732 if (WARN_ON_ONCE(!sg_list))
2733 return;
2734
a3db102d
BVA
2735 skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr,
2736 DMA_TO_DEVICE);
5d003240
BVA
2737}
2738
ca33dd92
BVA
2739static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq,
2740 unsigned int hctx_idx, unsigned int numa_node)
e67f86b3 2741{
ca33dd92 2742 struct skd_device *skdev = set->driver_data;
e7278a8b 2743 struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
e67f86b3 2744
e7278a8b
BVA
2745 skreq->state = SKD_REQ_STATE_IDLE;
2746 skreq->sg = (void *)(skreq + 1);
2747 sg_init_table(skreq->sg, skd_sgs_per_request);
2748 skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
2749 &skreq->sksg_dma_address);
e67f86b3 2750
e7278a8b
BVA
2751 return skreq->sksg_list ? 0 : -ENOMEM;
2752}
e67f86b3 2753
ca33dd92
BVA
2754static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq,
2755 unsigned int hctx_idx)
e7278a8b 2756{
ca33dd92 2757 struct skd_device *skdev = set->driver_data;
e7278a8b 2758 struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
e67f86b3 2759
a3db102d 2760 skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address);
e67f86b3
AB
2761}
2762
e67f86b3
AB
2763static int skd_cons_sksb(struct skd_device *skdev)
2764{
2765 int rc = 0;
2766 struct skd_special_context *skspcl;
e67f86b3
AB
2767
2768 skspcl = &skdev->internal_skspcl;
2769
2770 skspcl->req.id = 0 + SKD_ID_INTERNAL;
2771 skspcl->req.state = SKD_REQ_STATE_IDLE;
2772
a3db102d
BVA
2773 skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache,
2774 &skspcl->db_dma_address,
2775 GFP_DMA | __GFP_ZERO,
2776 DMA_BIDIRECTIONAL);
e67f86b3
AB
2777 if (skspcl->data_buf == NULL) {
2778 rc = -ENOMEM;
2779 goto err_out;
2780 }
2781
a3db102d
BVA
2782 skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache,
2783 &skspcl->mb_dma_address,
2784 GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
e67f86b3
AB
2785 if (skspcl->msg_buf == NULL) {
2786 rc = -ENOMEM;
2787 goto err_out;
2788 }
2789
e67f86b3
AB
2790 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
2791 &skspcl->req.sksg_dma_address);
2792 if (skspcl->req.sksg_list == NULL) {
2793 rc = -ENOMEM;
2794 goto err_out;
2795 }
2796
2797 if (!skd_format_internal_skspcl(skdev)) {
2798 rc = -EINVAL;
2799 goto err_out;
2800 }
2801
2802err_out:
2803 return rc;
2804}
2805
ca33dd92
BVA
2806static const struct blk_mq_ops skd_mq_ops = {
2807 .queue_rq = skd_mq_queue_rq,
296cb94c 2808 .complete = skd_complete_rq,
f2fe4459 2809 .timeout = skd_timed_out,
ca33dd92
BVA
2810 .init_request = skd_init_request,
2811 .exit_request = skd_exit_request,
2812};
2813
e67f86b3
AB
2814static int skd_cons_disk(struct skd_device *skdev)
2815{
2816 int rc = 0;
2817 struct gendisk *disk;
2818 struct request_queue *q;
2819 unsigned long flags;
2820
2821 disk = alloc_disk(SKD_MINORS_PER_DEVICE);
2822 if (!disk) {
2823 rc = -ENOMEM;
2824 goto err_out;
2825 }
2826
2827 skdev->disk = disk;
2828 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
2829
2830 disk->major = skdev->major;
2831 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
2832 disk->fops = &skd_blockdev_ops;
2833 disk->private_data = skdev;
2834
ca33dd92
BVA
2835 memset(&skdev->tag_set, 0, sizeof(skdev->tag_set));
2836 skdev->tag_set.ops = &skd_mq_ops;
2837 skdev->tag_set.nr_hw_queues = 1;
2838 skdev->tag_set.queue_depth = skd_max_queue_depth;
2839 skdev->tag_set.cmd_size = sizeof(struct skd_request_context) +
2840 skdev->sgs_per_request * sizeof(struct scatterlist);
2841 skdev->tag_set.numa_node = NUMA_NO_NODE;
2842 skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
ca33dd92
BVA
2843 BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO);
2844 skdev->tag_set.driver_data = skdev;
92d499d4
DC
2845 rc = blk_mq_alloc_tag_set(&skdev->tag_set);
2846 if (rc)
2847 goto err_out;
2848 q = blk_mq_init_queue(&skdev->tag_set);
2849 if (IS_ERR(q)) {
2850 blk_mq_free_tag_set(&skdev->tag_set);
2851 rc = PTR_ERR(q);
e67f86b3
AB
2852 goto err_out;
2853 }
e7278a8b 2854 q->queuedata = skdev;
e67f86b3
AB
2855
2856 skdev->queue = q;
2857 disk->queue = q;
e67f86b3 2858
6975f732 2859 blk_queue_write_cache(q, true, true);
e67f86b3
AB
2860 blk_queue_max_segments(q, skdev->sgs_per_request);
2861 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
2862
a5c5b392 2863 /* set optimal I/O size to 8KB */
e67f86b3
AB
2864 blk_queue_io_opt(q, 8192);
2865
8b904b5b
BVA
2866 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2867 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
e67f86b3 2868
a74d5b76 2869 blk_queue_rq_timeout(q, 8 * HZ);
a74d5b76 2870
e67f86b3 2871 spin_lock_irqsave(&skdev->lock, flags);
f98806d6 2872 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
ca33dd92 2873 blk_mq_stop_hw_queues(skdev->queue);
e67f86b3
AB
2874 spin_unlock_irqrestore(&skdev->lock, flags);
2875
2876err_out:
2877 return rc;
2878}
2879
542d7b00
BZ
2880#define SKD_N_DEV_TABLE 16u
2881static u32 skd_next_devno;
e67f86b3 2882
542d7b00 2883static struct skd_device *skd_construct(struct pci_dev *pdev)
e67f86b3 2884{
542d7b00
BZ
2885 struct skd_device *skdev;
2886 int blk_major = skd_major;
a3db102d 2887 size_t size;
542d7b00 2888 int rc;
e67f86b3 2889
542d7b00 2890 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
e67f86b3 2891
542d7b00 2892 if (!skdev) {
f98806d6 2893 dev_err(&pdev->dev, "memory alloc failure\n");
542d7b00
BZ
2894 return NULL;
2895 }
e67f86b3 2896
542d7b00
BZ
2897 skdev->state = SKD_DRVR_STATE_LOAD;
2898 skdev->pdev = pdev;
2899 skdev->devno = skd_next_devno++;
2900 skdev->major = blk_major;
542d7b00 2901 skdev->dev_max_queue_depth = 0;
e67f86b3 2902
542d7b00
BZ
2903 skdev->num_req_context = skd_max_queue_depth;
2904 skdev->num_fitmsg_context = skd_max_queue_depth;
542d7b00
BZ
2905 skdev->cur_max_queue_depth = 1;
2906 skdev->queue_low_water_mark = 1;
2907 skdev->proto_ver = 99;
2908 skdev->sgs_per_request = skd_sgs_per_request;
2909 skdev->dbg_level = skd_dbg_level;
e67f86b3 2910
542d7b00
BZ
2911 spin_lock_init(&skdev->lock);
2912
ca33dd92 2913 INIT_WORK(&skdev->start_queue, skd_start_queue);
542d7b00 2914 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
e67f86b3 2915
a3db102d
BVA
2916 size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES);
2917 skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0,
2918 SLAB_HWCACHE_ALIGN, NULL);
2919 if (!skdev->msgbuf_cache)
2920 goto err_out;
2921 WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size,
2922 "skd-msgbuf: %d < %zd\n",
2923 kmem_cache_size(skdev->msgbuf_cache), size);
2924 size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor);
2925 skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0,
2926 SLAB_HWCACHE_ALIGN, NULL);
2927 if (!skdev->sglist_cache)
2928 goto err_out;
2929 WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size,
2930 "skd-sglist: %d < %zd\n",
2931 kmem_cache_size(skdev->sglist_cache), size);
2932 size = SKD_N_INTERNAL_BYTES;
2933 skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0,
2934 SLAB_HWCACHE_ALIGN, NULL);
2935 if (!skdev->databuf_cache)
2936 goto err_out;
2937 WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size,
2938 "skd-databuf: %d < %zd\n",
2939 kmem_cache_size(skdev->databuf_cache), size);
2940
f98806d6 2941 dev_dbg(&skdev->pdev->dev, "skcomp\n");
542d7b00
BZ
2942 rc = skd_cons_skcomp(skdev);
2943 if (rc < 0)
2944 goto err_out;
e67f86b3 2945
f98806d6 2946 dev_dbg(&skdev->pdev->dev, "skmsg\n");
542d7b00
BZ
2947 rc = skd_cons_skmsg(skdev);
2948 if (rc < 0)
2949 goto err_out;
2950
f98806d6 2951 dev_dbg(&skdev->pdev->dev, "sksb\n");
542d7b00
BZ
2952 rc = skd_cons_sksb(skdev);
2953 if (rc < 0)
2954 goto err_out;
2955
f98806d6 2956 dev_dbg(&skdev->pdev->dev, "disk\n");
542d7b00
BZ
2957 rc = skd_cons_disk(skdev);
2958 if (rc < 0)
2959 goto err_out;
2960
f98806d6 2961 dev_dbg(&skdev->pdev->dev, "VICTORY\n");
542d7b00
BZ
2962 return skdev;
2963
2964err_out:
f98806d6 2965 dev_dbg(&skdev->pdev->dev, "construct failed\n");
542d7b00
BZ
2966 skd_destruct(skdev);
2967 return NULL;
e67f86b3
AB
2968}
2969
542d7b00
BZ
2970/*
2971 *****************************************************************************
2972 * DESTRUCT (FREE)
2973 *****************************************************************************
2974 */
2975
e67f86b3
AB
2976static void skd_free_skcomp(struct skd_device *skdev)
2977{
7f13bdad 2978 if (skdev->skcomp_table)
13812621
CH
2979 dma_free_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
2980 skdev->skcomp_table, skdev->cq_dma_address);
e67f86b3
AB
2981
2982 skdev->skcomp_table = NULL;
2983 skdev->cq_dma_address = 0;
2984}
2985
2986static void skd_free_skmsg(struct skd_device *skdev)
2987{
2988 u32 i;
2989
2990 if (skdev->skmsg_table == NULL)
2991 return;
2992
2993 for (i = 0; i < skdev->num_fitmsg_context; i++) {
2994 struct skd_fitmsg_context *skmsg;
2995
2996 skmsg = &skdev->skmsg_table[i];
2997
2998 if (skmsg->msg_buf != NULL) {
13812621
CH
2999 dma_free_coherent(&skdev->pdev->dev, SKD_N_FITMSG_BYTES,
3000 skmsg->msg_buf,
e67f86b3
AB
3001 skmsg->mb_dma_address);
3002 }
3003 skmsg->msg_buf = NULL;
3004 skmsg->mb_dma_address = 0;
3005 }
3006
3007 kfree(skdev->skmsg_table);
3008 skdev->skmsg_table = NULL;
3009}
3010
e67f86b3
AB
3011static void skd_free_sksb(struct skd_device *skdev)
3012{
a3db102d 3013 struct skd_special_context *skspcl = &skdev->internal_skspcl;
e67f86b3 3014
a3db102d
BVA
3015 skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf,
3016 skspcl->db_dma_address, DMA_BIDIRECTIONAL);
e67f86b3
AB
3017
3018 skspcl->data_buf = NULL;
3019 skspcl->db_dma_address = 0;
3020
a3db102d
BVA
3021 skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf,
3022 skspcl->mb_dma_address, DMA_TO_DEVICE);
e67f86b3
AB
3023
3024 skspcl->msg_buf = NULL;
3025 skspcl->mb_dma_address = 0;
3026
a3db102d 3027 skd_free_sg_list(skdev, skspcl->req.sksg_list,
e67f86b3
AB
3028 skspcl->req.sksg_dma_address);
3029
3030 skspcl->req.sksg_list = NULL;
3031 skspcl->req.sksg_dma_address = 0;
3032}
3033
e67f86b3
AB
3034static void skd_free_disk(struct skd_device *skdev)
3035{
3036 struct gendisk *disk = skdev->disk;
3037
7277cc67
BVA
3038 if (disk && (disk->flags & GENHD_FL_UP))
3039 del_gendisk(disk);
3040
3041 if (skdev->queue) {
3042 blk_cleanup_queue(skdev->queue);
3043 skdev->queue = NULL;
4633504c
BVA
3044 if (disk)
3045 disk->queue = NULL;
e67f86b3 3046 }
7277cc67 3047
ca33dd92
BVA
3048 if (skdev->tag_set.tags)
3049 blk_mq_free_tag_set(&skdev->tag_set);
3050
7277cc67 3051 put_disk(disk);
e67f86b3
AB
3052 skdev->disk = NULL;
3053}
3054
542d7b00
BZ
3055static void skd_destruct(struct skd_device *skdev)
3056{
3057 if (skdev == NULL)
3058 return;
3059
ca33dd92
BVA
3060 cancel_work_sync(&skdev->start_queue);
3061
f98806d6 3062 dev_dbg(&skdev->pdev->dev, "disk\n");
542d7b00
BZ
3063 skd_free_disk(skdev);
3064
f98806d6 3065 dev_dbg(&skdev->pdev->dev, "sksb\n");
542d7b00
BZ
3066 skd_free_sksb(skdev);
3067
f98806d6 3068 dev_dbg(&skdev->pdev->dev, "skmsg\n");
542d7b00 3069 skd_free_skmsg(skdev);
e67f86b3 3070
f98806d6 3071 dev_dbg(&skdev->pdev->dev, "skcomp\n");
542d7b00
BZ
3072 skd_free_skcomp(skdev);
3073
a3db102d
BVA
3074 kmem_cache_destroy(skdev->databuf_cache);
3075 kmem_cache_destroy(skdev->sglist_cache);
3076 kmem_cache_destroy(skdev->msgbuf_cache);
3077
f98806d6 3078 dev_dbg(&skdev->pdev->dev, "skdev\n");
542d7b00
BZ
3079 kfree(skdev);
3080}
e67f86b3
AB
3081
3082/*
3083 *****************************************************************************
3084 * BLOCK DEVICE (BDEV) GLUE
3085 *****************************************************************************
3086 */
3087
3088static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3089{
3090 struct skd_device *skdev;
3091 u64 capacity;
3092
3093 skdev = bdev->bd_disk->private_data;
3094
f98806d6
BVA
3095 dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
3096 bdev->bd_disk->disk_name, current->comm);
e67f86b3
AB
3097
3098 if (skdev->read_cap_is_valid) {
3099 capacity = get_capacity(skdev->disk);
3100 geo->heads = 64;
3101 geo->sectors = 255;
3102 geo->cylinders = (capacity) / (255 * 64);
3103
3104 return 0;
3105 }
3106 return -EIO;
3107}
3108
0d52c756 3109static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
e67f86b3 3110{
f98806d6 3111 dev_dbg(&skdev->pdev->dev, "add_disk\n");
fef912bf 3112 device_add_disk(parent, skdev->disk, NULL);
e67f86b3
AB
3113 return 0;
3114}
3115
3116static const struct block_device_operations skd_blockdev_ops = {
3117 .owner = THIS_MODULE,
e67f86b3
AB
3118 .getgeo = skd_bdev_getgeo,
3119};
3120
e67f86b3
AB
3121/*
3122 *****************************************************************************
3123 * PCIe DRIVER GLUE
3124 *****************************************************************************
3125 */
3126
9baa3c34 3127static const struct pci_device_id skd_pci_tbl[] = {
e67f86b3
AB
3128 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
3129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
3130 { 0 } /* terminate list */
3131};
3132
3133MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
3134
3135static char *skd_pci_info(struct skd_device *skdev, char *str)
3136{
3137 int pcie_reg;
3138
3139 strcpy(str, "PCIe (");
3140 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
3141
3142 if (pcie_reg) {
3143
3144 char lwstr[6];
3145 uint16_t pcie_lstat, lspeed, lwidth;
3146
3147 pcie_reg += 0x12;
3148 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
3149 lspeed = pcie_lstat & (0xF);
3150 lwidth = (pcie_lstat & 0x3F0) >> 4;
3151
3152 if (lspeed == 1)
3153 strcat(str, "2.5GT/s ");
3154 else if (lspeed == 2)
3155 strcat(str, "5.0GT/s ");
3156 else
3157 strcat(str, "<unknown> ");
3158 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
3159 strcat(str, lwstr);
3160 }
3161 return str;
3162}
3163
3164static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3165{
3166 int i;
3167 int rc = 0;
3168 char pci_str[32];
3169 struct skd_device *skdev;
3170
bb9f7dd3
BVA
3171 dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
3172 pdev->device);
e67f86b3
AB
3173
3174 rc = pci_enable_device(pdev);
3175 if (rc)
3176 return rc;
3177 rc = pci_request_regions(pdev, DRV_NAME);
3178 if (rc)
3179 goto err_out;
13812621
CH
3180 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3181 if (rc)
d91dc172 3182 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
13812621
CH
3183 if (rc) {
3184 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
3185 goto err_out_regions;
e67f86b3
AB
3186 }
3187
b8df6647
BZ
3188 if (!skd_major) {
3189 rc = register_blkdev(0, DRV_NAME);
3190 if (rc < 0)
3191 goto err_out_regions;
3192 BUG_ON(!rc);
3193 skd_major = rc;
3194 }
3195
e67f86b3 3196 skdev = skd_construct(pdev);
1762b57f
WY
3197 if (skdev == NULL) {
3198 rc = -ENOMEM;
e67f86b3 3199 goto err_out_regions;
1762b57f 3200 }
e67f86b3
AB
3201
3202 skd_pci_info(skdev, pci_str);
f98806d6 3203 dev_info(&pdev->dev, "%s 64bit\n", pci_str);
e67f86b3
AB
3204
3205 pci_set_master(pdev);
3206 rc = pci_enable_pcie_error_reporting(pdev);
3207 if (rc) {
f98806d6
BVA
3208 dev_err(&pdev->dev,
3209 "bad enable of PCIe error reporting rc=%d\n", rc);
e67f86b3
AB
3210 skdev->pcie_error_reporting_is_enabled = 0;
3211 } else
3212 skdev->pcie_error_reporting_is_enabled = 1;
3213
e67f86b3 3214 pci_set_drvdata(pdev, skdev);
ebedd16d 3215
e67f86b3
AB
3216 for (i = 0; i < SKD_MAX_BARS; i++) {
3217 skdev->mem_phys[i] = pci_resource_start(pdev, i);
3218 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
3219 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
3220 skdev->mem_size[i]);
3221 if (!skdev->mem_map[i]) {
f98806d6
BVA
3222 dev_err(&pdev->dev,
3223 "Unable to map adapter memory!\n");
e67f86b3
AB
3224 rc = -ENODEV;
3225 goto err_out_iounmap;
3226 }
f98806d6
BVA
3227 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
3228 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
3229 skdev->mem_size[i]);
e67f86b3
AB
3230 }
3231
3232 rc = skd_acquire_irq(skdev);
3233 if (rc) {
f98806d6 3234 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
e67f86b3
AB
3235 goto err_out_iounmap;
3236 }
3237
3238 rc = skd_start_timer(skdev);
3239 if (rc)
3240 goto err_out_timer;
3241
3242 init_waitqueue_head(&skdev->waitq);
3243
3244 skd_start_device(skdev);
3245
3246 rc = wait_event_interruptible_timeout(skdev->waitq,
3247 (skdev->gendisk_on),
3248 (SKD_START_WAIT_SECONDS * HZ));
3249 if (skdev->gendisk_on > 0) {
3250 /* device came on-line after reset */
0d52c756 3251 skd_bdev_attach(&pdev->dev, skdev);
e67f86b3
AB
3252 rc = 0;
3253 } else {
3254 /* we timed out, something is wrong with the device,
3255 don't add the disk structure */
f98806d6
BVA
3256 dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
3257 rc);
e67f86b3
AB
3258 /* in case of no error; we timeout with ENXIO */
3259 if (!rc)
3260 rc = -ENXIO;
3261 goto err_out_timer;
3262 }
3263
e67f86b3
AB
3264 return rc;
3265
3266err_out_timer:
3267 skd_stop_device(skdev);
3268 skd_release_irq(skdev);
3269
3270err_out_iounmap:
3271 for (i = 0; i < SKD_MAX_BARS; i++)
3272 if (skdev->mem_map[i])
3273 iounmap(skdev->mem_map[i]);
3274
3275 if (skdev->pcie_error_reporting_is_enabled)
3276 pci_disable_pcie_error_reporting(pdev);
3277
3278 skd_destruct(skdev);
3279
3280err_out_regions:
3281 pci_release_regions(pdev);
3282
3283err_out:
3284 pci_disable_device(pdev);
3285 pci_set_drvdata(pdev, NULL);
3286 return rc;
3287}
3288
3289static void skd_pci_remove(struct pci_dev *pdev)
3290{
3291 int i;
3292 struct skd_device *skdev;
3293
3294 skdev = pci_get_drvdata(pdev);
3295 if (!skdev) {
f98806d6 3296 dev_err(&pdev->dev, "no device data for PCI\n");
e67f86b3
AB
3297 return;
3298 }
3299 skd_stop_device(skdev);
3300 skd_release_irq(skdev);
3301
3302 for (i = 0; i < SKD_MAX_BARS; i++)
3303 if (skdev->mem_map[i])
4854afe3 3304 iounmap(skdev->mem_map[i]);
e67f86b3
AB
3305
3306 if (skdev->pcie_error_reporting_is_enabled)
3307 pci_disable_pcie_error_reporting(pdev);
3308
3309 skd_destruct(skdev);
3310
3311 pci_release_regions(pdev);
3312 pci_disable_device(pdev);
3313 pci_set_drvdata(pdev, NULL);
3314
3315 return;
3316}
3317
3318static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3319{
3320 int i;
3321 struct skd_device *skdev;
3322
3323 skdev = pci_get_drvdata(pdev);
3324 if (!skdev) {
f98806d6 3325 dev_err(&pdev->dev, "no device data for PCI\n");
e67f86b3
AB
3326 return -EIO;
3327 }
3328
3329 skd_stop_device(skdev);
3330
3331 skd_release_irq(skdev);
3332
3333 for (i = 0; i < SKD_MAX_BARS; i++)
3334 if (skdev->mem_map[i])
4854afe3 3335 iounmap(skdev->mem_map[i]);
e67f86b3
AB
3336
3337 if (skdev->pcie_error_reporting_is_enabled)
3338 pci_disable_pcie_error_reporting(pdev);
3339
3340 pci_release_regions(pdev);
3341 pci_save_state(pdev);
3342 pci_disable_device(pdev);
3343 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3344 return 0;
3345}
3346
3347static int skd_pci_resume(struct pci_dev *pdev)
3348{
3349 int i;
3350 int rc = 0;
3351 struct skd_device *skdev;
3352
3353 skdev = pci_get_drvdata(pdev);
3354 if (!skdev) {
f98806d6 3355 dev_err(&pdev->dev, "no device data for PCI\n");
e67f86b3
AB
3356 return -1;
3357 }
3358
3359 pci_set_power_state(pdev, PCI_D0);
3360 pci_enable_wake(pdev, PCI_D0, 0);
3361 pci_restore_state(pdev);
3362
3363 rc = pci_enable_device(pdev);
3364 if (rc)
3365 return rc;
3366 rc = pci_request_regions(pdev, DRV_NAME);
3367 if (rc)
3368 goto err_out;
13812621
CH
3369 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3370 if (rc)
d91dc172 3371 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
13812621
CH
3372 if (rc) {
3373 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
3374 goto err_out_regions;
e67f86b3
AB
3375 }
3376
3377 pci_set_master(pdev);
3378 rc = pci_enable_pcie_error_reporting(pdev);
3379 if (rc) {
f98806d6
BVA
3380 dev_err(&pdev->dev,
3381 "bad enable of PCIe error reporting rc=%d\n", rc);
e67f86b3
AB
3382 skdev->pcie_error_reporting_is_enabled = 0;
3383 } else
3384 skdev->pcie_error_reporting_is_enabled = 1;
3385
3386 for (i = 0; i < SKD_MAX_BARS; i++) {
3387
3388 skdev->mem_phys[i] = pci_resource_start(pdev, i);
3389 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
3390 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
3391 skdev->mem_size[i]);
3392 if (!skdev->mem_map[i]) {
f98806d6 3393 dev_err(&pdev->dev, "Unable to map adapter memory!\n");
e67f86b3
AB
3394 rc = -ENODEV;
3395 goto err_out_iounmap;
3396 }
f98806d6
BVA
3397 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
3398 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
3399 skdev->mem_size[i]);
e67f86b3
AB
3400 }
3401 rc = skd_acquire_irq(skdev);
3402 if (rc) {
f98806d6 3403 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
e67f86b3
AB
3404 goto err_out_iounmap;
3405 }
3406
3407 rc = skd_start_timer(skdev);
3408 if (rc)
3409 goto err_out_timer;
3410
3411 init_waitqueue_head(&skdev->waitq);
3412
3413 skd_start_device(skdev);
3414
3415 return rc;
3416
3417err_out_timer:
3418 skd_stop_device(skdev);
3419 skd_release_irq(skdev);
3420
3421err_out_iounmap:
3422 for (i = 0; i < SKD_MAX_BARS; i++)
3423 if (skdev->mem_map[i])
3424 iounmap(skdev->mem_map[i]);
3425
3426 if (skdev->pcie_error_reporting_is_enabled)
3427 pci_disable_pcie_error_reporting(pdev);
3428
3429err_out_regions:
3430 pci_release_regions(pdev);
3431
3432err_out:
3433 pci_disable_device(pdev);
3434 return rc;
3435}
3436
3437static void skd_pci_shutdown(struct pci_dev *pdev)
3438{
3439 struct skd_device *skdev;
3440
f98806d6 3441 dev_err(&pdev->dev, "%s called\n", __func__);
e67f86b3
AB
3442
3443 skdev = pci_get_drvdata(pdev);
3444 if (!skdev) {
f98806d6 3445 dev_err(&pdev->dev, "no device data for PCI\n");
e67f86b3
AB
3446 return;
3447 }
3448
f98806d6 3449 dev_err(&pdev->dev, "calling stop\n");
e67f86b3
AB
3450 skd_stop_device(skdev);
3451}
3452
3453static struct pci_driver skd_driver = {
3454 .name = DRV_NAME,
3455 .id_table = skd_pci_tbl,
3456 .probe = skd_pci_probe,
3457 .remove = skd_pci_remove,
3458 .suspend = skd_pci_suspend,
3459 .resume = skd_pci_resume,
3460 .shutdown = skd_pci_shutdown,
3461};
3462
3463/*
3464 *****************************************************************************
3465 * LOGGING SUPPORT
3466 *****************************************************************************
3467 */
3468
e67f86b3
AB
3469const char *skd_drive_state_to_str(int state)
3470{
3471 switch (state) {
3472 case FIT_SR_DRIVE_OFFLINE:
3473 return "OFFLINE";
3474 case FIT_SR_DRIVE_INIT:
3475 return "INIT";
3476 case FIT_SR_DRIVE_ONLINE:
3477 return "ONLINE";
3478 case FIT_SR_DRIVE_BUSY:
3479 return "BUSY";
3480 case FIT_SR_DRIVE_FAULT:
3481 return "FAULT";
3482 case FIT_SR_DRIVE_DEGRADED:
3483 return "DEGRADED";
3484 case FIT_SR_PCIE_LINK_DOWN:
3485 return "INK_DOWN";
3486 case FIT_SR_DRIVE_SOFT_RESET:
3487 return "SOFT_RESET";
3488 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3489 return "NEED_FW";
3490 case FIT_SR_DRIVE_INIT_FAULT:
3491 return "INIT_FAULT";
3492 case FIT_SR_DRIVE_BUSY_SANITIZE:
3493 return "BUSY_SANITIZE";
3494 case FIT_SR_DRIVE_BUSY_ERASE:
3495 return "BUSY_ERASE";
3496 case FIT_SR_DRIVE_FW_BOOTING:
3497 return "FW_BOOTING";
3498 default:
3499 return "???";
3500 }
3501}
3502
3503const char *skd_skdev_state_to_str(enum skd_drvr_state state)
3504{
3505 switch (state) {
3506 case SKD_DRVR_STATE_LOAD:
3507 return "LOAD";
3508 case SKD_DRVR_STATE_IDLE:
3509 return "IDLE";
3510 case SKD_DRVR_STATE_BUSY:
3511 return "BUSY";
3512 case SKD_DRVR_STATE_STARTING:
3513 return "STARTING";
3514 case SKD_DRVR_STATE_ONLINE:
3515 return "ONLINE";
3516 case SKD_DRVR_STATE_PAUSING:
3517 return "PAUSING";
3518 case SKD_DRVR_STATE_PAUSED:
3519 return "PAUSED";
e67f86b3
AB
3520 case SKD_DRVR_STATE_RESTARTING:
3521 return "RESTARTING";
3522 case SKD_DRVR_STATE_RESUMING:
3523 return "RESUMING";
3524 case SKD_DRVR_STATE_STOPPING:
3525 return "STOPPING";
3526 case SKD_DRVR_STATE_SYNCING:
3527 return "SYNCING";
3528 case SKD_DRVR_STATE_FAULT:
3529 return "FAULT";
3530 case SKD_DRVR_STATE_DISAPPEARED:
3531 return "DISAPPEARED";
3532 case SKD_DRVR_STATE_BUSY_ERASE:
3533 return "BUSY_ERASE";
3534 case SKD_DRVR_STATE_BUSY_SANITIZE:
3535 return "BUSY_SANITIZE";
3536 case SKD_DRVR_STATE_BUSY_IMMINENT:
3537 return "BUSY_IMMINENT";
3538 case SKD_DRVR_STATE_WAIT_BOOT:
3539 return "WAIT_BOOT";
3540
3541 default:
3542 return "???";
3543 }
3544}
3545
a26ba7fa 3546static const char *skd_skreq_state_to_str(enum skd_req_state state)
e67f86b3
AB
3547{
3548 switch (state) {
3549 case SKD_REQ_STATE_IDLE:
3550 return "IDLE";
3551 case SKD_REQ_STATE_SETUP:
3552 return "SETUP";
3553 case SKD_REQ_STATE_BUSY:
3554 return "BUSY";
3555 case SKD_REQ_STATE_COMPLETED:
3556 return "COMPLETED";
3557 case SKD_REQ_STATE_TIMEOUT:
3558 return "TIMEOUT";
e67f86b3
AB
3559 default:
3560 return "???";
3561 }
3562}
3563
3564static void skd_log_skdev(struct skd_device *skdev, const char *event)
3565{
f98806d6
BVA
3566 dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
3567 dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
3568 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3569 skd_skdev_state_to_str(skdev->state), skdev->state);
3570 dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
d4d0f5fc 3571 skd_in_flight(skdev), skdev->cur_max_queue_depth,
f98806d6 3572 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
a74d5b76
BVA
3573 dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n",
3574 skdev->skcomp_cycle, skdev->skcomp_ix);
e67f86b3
AB
3575}
3576
e67f86b3
AB
3577static void skd_log_skreq(struct skd_device *skdev,
3578 struct skd_request_context *skreq, const char *event)
3579{
e7278a8b
BVA
3580 struct request *req = blk_mq_rq_from_pdu(skreq);
3581 u32 lba = blk_rq_pos(req);
3582 u32 count = blk_rq_sectors(req);
3583
f98806d6
BVA
3584 dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
3585 dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
3586 skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
3587 skreq->fitmsg_id);
a74d5b76
BVA
3588 dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n",
3589 skreq->data_dir, skreq->n_sg);
ca33dd92 3590
e7278a8b
BVA
3591 dev_dbg(&skdev->pdev->dev,
3592 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
3593 count, count, (int)rq_data_dir(req));
e67f86b3
AB
3594}
3595
3596/*
3597 *****************************************************************************
3598 * MODULE GLUE
3599 *****************************************************************************
3600 */
3601
3602static int __init skd_init(void)
3603{
16a70534
BVA
3604 BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
3605 BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
3606 BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
3607 BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
3608 BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
d891fe60
BVA
3609 BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
3610 BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
3611 BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
2da7b403 3612
e67f86b3
AB
3613 switch (skd_isr_type) {
3614 case SKD_IRQ_LEGACY:
3615 case SKD_IRQ_MSI:
3616 case SKD_IRQ_MSIX:
3617 break;
3618 default:
fbed149a 3619 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
e67f86b3
AB
3620 skd_isr_type, SKD_IRQ_DEFAULT);
3621 skd_isr_type = SKD_IRQ_DEFAULT;
3622 }
3623
fbed149a
BZ
3624 if (skd_max_queue_depth < 1 ||
3625 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
3626 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
e67f86b3
AB
3627 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
3628 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
3629 }
3630
2da7b403
BVA
3631 if (skd_max_req_per_msg < 1 ||
3632 skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
fbed149a 3633 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
e67f86b3
AB
3634 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
3635 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
3636 }
3637
3638 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
fbed149a 3639 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
e67f86b3
AB
3640 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
3641 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
3642 }
3643
3644 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
fbed149a 3645 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
e67f86b3
AB
3646 skd_dbg_level, 0);
3647 skd_dbg_level = 0;
3648 }
3649
3650 if (skd_isr_comp_limit < 0) {
fbed149a 3651 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
e67f86b3
AB
3652 skd_isr_comp_limit, 0);
3653 skd_isr_comp_limit = 0;
3654 }
3655
b8df6647 3656 return pci_register_driver(&skd_driver);
e67f86b3
AB
3657}
3658
3659static void __exit skd_exit(void)
3660{
e67f86b3 3661 pci_unregister_driver(&skd_driver);
b8df6647
BZ
3662
3663 if (skd_major)
3664 unregister_blkdev(skd_major, DRV_NAME);
e67f86b3
AB
3665}
3666
e67f86b3
AB
3667module_init(skd_init);
3668module_exit(skd_exit);