]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/block/skd_main.c
skd: fix error return code in skd_pci_probe()
[mirror_ubuntu-artful-kernel.git] / drivers / block / skd_main.c
CommitLineData
e67f86b3
AB
1/* Copyright 2012 STEC, Inc.
2 *
3 * This file is licensed under the terms of the 3-clause
4 * BSD License (http://opensource.org/licenses/BSD-3-Clause)
5 * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
6 * at your option. Both licenses are also available in the LICENSE file
7 * distributed with this project. This file may not be copied, modified,
8 * or distributed except in accordance with those terms.
9 * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
10 * Initial Driver Design!
11 * Thomas Swann <tswann@stec-inc.com>
12 * Interrupt handling.
13 * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
14 * biomode implementation.
15 * Akhil Bhansali <abhansali@stec-inc.com>
16 * Added support for DISCARD / FLUSH and FUA.
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/pci.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <linux/blkdev.h>
26#include <linux/sched.h>
27#include <linux/interrupt.h>
28#include <linux/compiler.h>
29#include <linux/workqueue.h>
30#include <linux/bitops.h>
31#include <linux/delay.h>
32#include <linux/time.h>
33#include <linux/hdreg.h>
34#include <linux/dma-mapping.h>
35#include <linux/completion.h>
36#include <linux/scatterlist.h>
37#include <linux/version.h>
38#include <linux/err.h>
39#include <linux/scatterlist.h>
40#include <linux/aer.h>
41#include <linux/ctype.h>
42#include <linux/wait.h>
43#include <linux/uio.h>
44#include <scsi/scsi.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_tcq.h>
47#include <scsi/scsi_cmnd.h>
48#include <scsi/sg.h>
49#include <linux/io.h>
50#include <linux/uaccess.h>
51#include <asm-generic/unaligned.h>
52
53#include "skd_s1120.h"
54
55static int skd_dbg_level;
56static int skd_isr_comp_limit = 4;
57
58enum {
59 STEC_LINK_2_5GTS = 0,
60 STEC_LINK_5GTS = 1,
61 STEC_LINK_8GTS = 2,
62 STEC_LINK_UNKNOWN = 0xFF
63};
64
65enum {
66 SKD_FLUSH_INITIALIZER,
67 SKD_FLUSH_ZERO_SIZE_FIRST,
68 SKD_FLUSH_DATA_SECOND,
69};
70
e67f86b3
AB
71#define SKD_ASSERT(expr) \
72 do { \
73 if (unlikely(!(expr))) { \
74 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
75 # expr, __FILE__, __func__, __LINE__); \
76 } \
77 } while (0)
78
e67f86b3
AB
79#define DRV_NAME "skd"
80#define DRV_VERSION "2.2.1"
81#define DRV_BUILD_ID "0260"
82#define PFX DRV_NAME ": "
83#define DRV_BIN_VERSION 0x100
84#define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
85
86MODULE_AUTHOR("bug-reports: support@stec-inc.com");
87MODULE_LICENSE("Dual BSD/GPL");
88
89MODULE_DESCRIPTION("STEC s1120 PCIe SSD block/BIO driver (b" DRV_BUILD_ID ")");
90MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
91
92#define PCI_VENDOR_ID_STEC 0x1B39
93#define PCI_DEVICE_ID_S1120 0x0001
94
95#define SKD_FUA_NV (1 << 1)
96#define SKD_MINORS_PER_DEVICE 16
97
98#define SKD_MAX_QUEUE_DEPTH 200u
99
100#define SKD_PAUSE_TIMEOUT (5 * 1000)
101
102#define SKD_N_FITMSG_BYTES (512u)
103
104#define SKD_N_SPECIAL_CONTEXT 32u
105#define SKD_N_SPECIAL_FITMSG_BYTES (128u)
106
107/* SG elements are 32 bytes, so we can make this 4096 and still be under the
108 * 128KB limit. That allows 4096*4K = 16M xfer size
109 */
110#define SKD_N_SG_PER_REQ_DEFAULT 256u
111#define SKD_N_SG_PER_SPECIAL 256u
112
113#define SKD_N_COMPLETION_ENTRY 256u
114#define SKD_N_READ_CAP_BYTES (8u)
115
116#define SKD_N_INTERNAL_BYTES (512u)
117
118/* 5 bits of uniqifier, 0xF800 */
119#define SKD_ID_INCR (0x400)
120#define SKD_ID_TABLE_MASK (3u << 8u)
121#define SKD_ID_RW_REQUEST (0u << 8u)
122#define SKD_ID_INTERNAL (1u << 8u)
123#define SKD_ID_SPECIAL_REQUEST (2u << 8u)
124#define SKD_ID_FIT_MSG (3u << 8u)
125#define SKD_ID_SLOT_MASK 0x00FFu
126#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
127
128#define SKD_N_TIMEOUT_SLOT 4u
129#define SKD_TIMEOUT_SLOT_MASK 3u
130
131#define SKD_N_MAX_SECTORS 2048u
132
133#define SKD_MAX_RETRIES 2u
134
135#define SKD_TIMER_SECONDS(seconds) (seconds)
136#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
137
138#define INQ_STD_NBYTES 36
139#define SKD_DISCARD_CDB_LENGTH 24
140
141enum skd_drvr_state {
142 SKD_DRVR_STATE_LOAD,
143 SKD_DRVR_STATE_IDLE,
144 SKD_DRVR_STATE_BUSY,
145 SKD_DRVR_STATE_STARTING,
146 SKD_DRVR_STATE_ONLINE,
147 SKD_DRVR_STATE_PAUSING,
148 SKD_DRVR_STATE_PAUSED,
149 SKD_DRVR_STATE_DRAINING_TIMEOUT,
150 SKD_DRVR_STATE_RESTARTING,
151 SKD_DRVR_STATE_RESUMING,
152 SKD_DRVR_STATE_STOPPING,
153 SKD_DRVR_STATE_FAULT,
154 SKD_DRVR_STATE_DISAPPEARED,
155 SKD_DRVR_STATE_PROTOCOL_MISMATCH,
156 SKD_DRVR_STATE_BUSY_ERASE,
157 SKD_DRVR_STATE_BUSY_SANITIZE,
158 SKD_DRVR_STATE_BUSY_IMMINENT,
159 SKD_DRVR_STATE_WAIT_BOOT,
160 SKD_DRVR_STATE_SYNCING,
161};
162
163#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
164#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
165#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
166#define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
167#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
168#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
169#define SKD_START_WAIT_SECONDS 90u
170
171enum skd_req_state {
172 SKD_REQ_STATE_IDLE,
173 SKD_REQ_STATE_SETUP,
174 SKD_REQ_STATE_BUSY,
175 SKD_REQ_STATE_COMPLETED,
176 SKD_REQ_STATE_TIMEOUT,
177 SKD_REQ_STATE_ABORTED,
178};
179
180enum skd_fit_msg_state {
181 SKD_MSG_STATE_IDLE,
182 SKD_MSG_STATE_BUSY,
183};
184
185enum skd_check_status_action {
186 SKD_CHECK_STATUS_REPORT_GOOD,
187 SKD_CHECK_STATUS_REPORT_SMART_ALERT,
188 SKD_CHECK_STATUS_REQUEUE_REQUEST,
189 SKD_CHECK_STATUS_REPORT_ERROR,
190 SKD_CHECK_STATUS_BUSY_IMMINENT,
191};
192
193struct skd_fitmsg_context {
194 enum skd_fit_msg_state state;
195
196 struct skd_fitmsg_context *next;
197
198 u32 id;
199 u16 outstanding;
200
201 u32 length;
202 u32 offset;
203
204 u8 *msg_buf;
205 dma_addr_t mb_dma_address;
206};
207
208struct skd_request_context {
209 enum skd_req_state state;
210
211 struct skd_request_context *next;
212
213 u16 id;
214 u32 fitmsg_id;
215
216 struct request *req;
217 struct bio *bio;
218 unsigned long start_time;
219 u8 flush_cmd;
220 u8 discard_page;
221
222 u32 timeout_stamp;
223 u8 sg_data_dir;
224 struct scatterlist *sg;
225 u32 n_sg;
226 u32 sg_byte_count;
227
228 struct fit_sg_descriptor *sksg_list;
229 dma_addr_t sksg_dma_address;
230
231 struct fit_completion_entry_v1 completion;
232
233 struct fit_comp_error_info err_info;
234
235};
236#define SKD_DATA_DIR_HOST_TO_CARD 1
237#define SKD_DATA_DIR_CARD_TO_HOST 2
238#define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */
239
240struct skd_special_context {
241 struct skd_request_context req;
242
243 u8 orphaned;
244
245 void *data_buf;
246 dma_addr_t db_dma_address;
247
248 u8 *msg_buf;
249 dma_addr_t mb_dma_address;
250};
251
252struct skd_sg_io {
253 fmode_t mode;
254 void __user *argp;
255
256 struct sg_io_hdr sg;
257
258 u8 cdb[16];
259
260 u32 dxfer_len;
261 u32 iovcnt;
262 struct sg_iovec *iov;
263 struct sg_iovec no_iov_iov;
264
265 struct skd_special_context *skspcl;
266};
267
268typedef enum skd_irq_type {
269 SKD_IRQ_LEGACY,
270 SKD_IRQ_MSI,
271 SKD_IRQ_MSIX
272} skd_irq_type_t;
273
274#define SKD_MAX_BARS 2
275
276struct skd_device {
277 volatile void __iomem *mem_map[SKD_MAX_BARS];
278 resource_size_t mem_phys[SKD_MAX_BARS];
279 u32 mem_size[SKD_MAX_BARS];
280
281 skd_irq_type_t irq_type;
282 u32 msix_count;
283 struct skd_msix_entry *msix_entries;
284
285 struct pci_dev *pdev;
286 int pcie_error_reporting_is_enabled;
287
288 spinlock_t lock;
289 struct gendisk *disk;
290 struct request_queue *queue;
291 struct device *class_dev;
292 int gendisk_on;
293 int sync_done;
294
295 atomic_t device_count;
296 u32 devno;
297 u32 major;
298 char name[32];
299 char isr_name[30];
300
301 enum skd_drvr_state state;
302 u32 drive_state;
303
304 u32 in_flight;
305 u32 cur_max_queue_depth;
306 u32 queue_low_water_mark;
307 u32 dev_max_queue_depth;
308
309 u32 num_fitmsg_context;
310 u32 num_req_context;
311
312 u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
313 u32 timeout_stamp;
314 struct skd_fitmsg_context *skmsg_free_list;
315 struct skd_fitmsg_context *skmsg_table;
316
317 struct skd_request_context *skreq_free_list;
318 struct skd_request_context *skreq_table;
319
320 struct skd_special_context *skspcl_free_list;
321 struct skd_special_context *skspcl_table;
322
323 struct skd_special_context internal_skspcl;
324 u32 read_cap_blocksize;
325 u32 read_cap_last_lba;
326 int read_cap_is_valid;
327 int inquiry_is_valid;
328 u8 inq_serial_num[13]; /*12 chars plus null term */
329 u8 id_str[80]; /* holds a composite name (pci + sernum) */
330
331 u8 skcomp_cycle;
332 u32 skcomp_ix;
333 struct fit_completion_entry_v1 *skcomp_table;
334 struct fit_comp_error_info *skerr_table;
335 dma_addr_t cq_dma_address;
336
337 wait_queue_head_t waitq;
338
339 struct timer_list timer;
340 u32 timer_countdown;
341 u32 timer_substate;
342
343 int n_special;
344 int sgs_per_request;
345 u32 last_mtd;
346
347 u32 proto_ver;
348
349 int dbg_level;
350 u32 connect_time_stamp;
351 int connect_retries;
352#define SKD_MAX_CONNECT_RETRIES 16
353 u32 drive_jiffies;
354
355 u32 timo_slot;
356
357
358 struct work_struct completion_worker;
359
360 struct bio_list bio_queue;
361 int queue_stopped;
362
363 struct list_head flush_list;
364};
365
366#define SKD_FLUSH_JOB "skd-flush-jobs"
367struct kmem_cache *skd_flush_slab;
368
369/*
370 * These commands hold "nonzero size FLUSH bios",
371 * which are enqueud in skdev->flush_list during
372 * completion of "zero size FLUSH commands".
373 * It will be active in biomode.
374 */
375struct skd_flush_cmd {
376 void *cmd;
377 struct list_head flist;
378};
379
380#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
381#define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
382#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
383
384static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
385{
386 u32 val;
387
388 if (likely(skdev->dbg_level < 2))
389 return readl(skdev->mem_map[1] + offset);
390 else {
391 barrier();
392 val = readl(skdev->mem_map[1] + offset);
393 barrier();
2e44b427 394 pr_debug("%s:%s:%d offset %x = %x\n",
395 skdev->name, __func__, __LINE__, offset, val);
e67f86b3
AB
396 return val;
397 }
398
399}
400
401static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
402 u32 offset)
403{
404 if (likely(skdev->dbg_level < 2)) {
405 writel(val, skdev->mem_map[1] + offset);
406 barrier();
e67f86b3
AB
407 } else {
408 barrier();
409 writel(val, skdev->mem_map[1] + offset);
410 barrier();
2e44b427 411 pr_debug("%s:%s:%d offset %x = %x\n",
412 skdev->name, __func__, __LINE__, offset, val);
e67f86b3
AB
413 }
414}
415
416static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
417 u32 offset)
418{
419 if (likely(skdev->dbg_level < 2)) {
420 writeq(val, skdev->mem_map[1] + offset);
421 barrier();
e67f86b3
AB
422 } else {
423 barrier();
424 writeq(val, skdev->mem_map[1] + offset);
425 barrier();
2e44b427 426 pr_debug("%s:%s:%d offset %x = %016llx\n",
427 skdev->name, __func__, __LINE__, offset, val);
e67f86b3
AB
428 }
429}
430
431
432#define SKD_IRQ_DEFAULT SKD_IRQ_MSI
433static int skd_isr_type = SKD_IRQ_DEFAULT;
434
435module_param(skd_isr_type, int, 0444);
436MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
437 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
438
439#define SKD_MAX_REQ_PER_MSG_DEFAULT 1
440static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
441
442module_param(skd_max_req_per_msg, int, 0444);
443MODULE_PARM_DESC(skd_max_req_per_msg,
444 "Maximum SCSI requests packed in a single message."
445 " (1-14, default==1)");
446
447#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
448#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
449static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
450
451module_param(skd_max_queue_depth, int, 0444);
452MODULE_PARM_DESC(skd_max_queue_depth,
453 "Maximum SCSI requests issued to s1120."
454 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
455
456static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
457module_param(skd_sgs_per_request, int, 0444);
458MODULE_PARM_DESC(skd_sgs_per_request,
459 "Maximum SG elements per block request."
460 " (1-4096, default==256)");
461
462static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
463module_param(skd_max_pass_thru, int, 0444);
464MODULE_PARM_DESC(skd_max_pass_thru,
465 "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
466
467module_param(skd_dbg_level, int, 0444);
468MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
469
470module_param(skd_isr_comp_limit, int, 0444);
471MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
472
473static int skd_bio;
474module_param(skd_bio, int, 0444);
475MODULE_PARM_DESC(skd_bio,
476 "Register as a bio device instead of block (0, 1) default=0");
477
478/* Major device number dynamically assigned. */
479static u32 skd_major;
480
481static struct skd_device *skd_construct(struct pci_dev *pdev);
482static void skd_destruct(struct skd_device *skdev);
483static const struct block_device_operations skd_blockdev_ops;
484static void skd_send_fitmsg(struct skd_device *skdev,
485 struct skd_fitmsg_context *skmsg);
486static void skd_send_special_fitmsg(struct skd_device *skdev,
487 struct skd_special_context *skspcl);
488static void skd_request_fn(struct request_queue *rq);
489static void skd_end_request(struct skd_device *skdev,
490 struct skd_request_context *skreq, int error);
491static int skd_preop_sg_list(struct skd_device *skdev,
492 struct skd_request_context *skreq);
493static void skd_postop_sg_list(struct skd_device *skdev,
494 struct skd_request_context *skreq);
495
496static void skd_restart_device(struct skd_device *skdev);
497static int skd_quiesce_dev(struct skd_device *skdev);
498static int skd_unquiesce_dev(struct skd_device *skdev);
499static void skd_release_special(struct skd_device *skdev,
500 struct skd_special_context *skspcl);
501static void skd_disable_interrupts(struct skd_device *skdev);
502static void skd_isr_fwstate(struct skd_device *skdev);
503static void skd_recover_requests(struct skd_device *skdev, int requeue);
504static void skd_soft_reset(struct skd_device *skdev);
505
506static const char *skd_name(struct skd_device *skdev);
507const char *skd_drive_state_to_str(int state);
508const char *skd_skdev_state_to_str(enum skd_drvr_state state);
509static void skd_log_skdev(struct skd_device *skdev, const char *event);
510static void skd_log_skmsg(struct skd_device *skdev,
511 struct skd_fitmsg_context *skmsg, const char *event);
512static void skd_log_skreq(struct skd_device *skdev,
513 struct skd_request_context *skreq, const char *event);
514
515/* FLUSH FUA flag handling. */
516static int skd_flush_cmd_enqueue(struct skd_device *, void *);
517static void *skd_flush_cmd_dequeue(struct skd_device *);
518
519
520/*
521 *****************************************************************************
522 * READ/WRITE REQUESTS
523 *****************************************************************************
524 */
525static void skd_stop_queue(struct skd_device *skdev)
526{
527 if (!skd_bio)
528 blk_stop_queue(skdev->queue);
529 else
530 skdev->queue_stopped = 1;
531}
532
533static void skd_unstop_queue(struct skd_device *skdev)
534{
535 if (!skd_bio)
536 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
537 else
538 skdev->queue_stopped = 0;
539}
540
541static void skd_start_queue(struct skd_device *skdev)
542{
543 if (!skd_bio) {
544 blk_start_queue(skdev->queue);
545 } else {
546 pr_err("(%s): Starting queue\n", skd_name(skdev));
547 skdev->queue_stopped = 0;
548 skd_request_fn(skdev->queue);
549 }
550}
551
552static int skd_queue_stopped(struct skd_device *skdev)
553{
554 if (!skd_bio)
555 return blk_queue_stopped(skdev->queue);
556 else
557 return skdev->queue_stopped;
558}
559
560static void skd_fail_all_pending_blk(struct skd_device *skdev)
561{
562 struct request_queue *q = skdev->queue;
563 struct request *req;
564
565 for (;; ) {
566 req = blk_peek_request(q);
567 if (req == NULL)
568 break;
569 blk_start_request(req);
570 __blk_end_request_all(req, -EIO);
571 }
572}
573
574static void skd_fail_all_pending_bio(struct skd_device *skdev)
575{
576 struct bio *bio;
577 int error = -EIO;
578
579 for (;; ) {
580 bio = bio_list_pop(&skdev->bio_queue);
581
582 if (bio == NULL)
583 break;
584
585 bio_endio(bio, error);
586 }
587}
588
589static void skd_fail_all_pending(struct skd_device *skdev)
590{
591 if (!skd_bio)
592 skd_fail_all_pending_blk(skdev);
593 else
594 skd_fail_all_pending_bio(skdev);
595}
596
597static void skd_make_request(struct request_queue *q, struct bio *bio)
598{
599 struct skd_device *skdev = q->queuedata;
600 unsigned long flags;
601
602 spin_lock_irqsave(&skdev->lock, flags);
603
604 bio_list_add(&skdev->bio_queue, bio);
605 skd_request_fn(skdev->queue);
606
607 spin_unlock_irqrestore(&skdev->lock, flags);
608}
609
610static void
611skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
612 int data_dir, unsigned lba,
613 unsigned count)
614{
615 if (data_dir == READ)
616 scsi_req->cdb[0] = 0x28;
617 else
618 scsi_req->cdb[0] = 0x2a;
619
620 scsi_req->cdb[1] = 0;
621 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
622 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
623 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
624 scsi_req->cdb[5] = (lba & 0xff);
625 scsi_req->cdb[6] = 0;
626 scsi_req->cdb[7] = (count & 0xff00) >> 8;
627 scsi_req->cdb[8] = count & 0xff;
628 scsi_req->cdb[9] = 0;
629}
630
631static void
632skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
633 struct skd_request_context *skreq)
634{
635 skreq->flush_cmd = 1;
636
637 scsi_req->cdb[0] = 0x35;
638 scsi_req->cdb[1] = 0;
639 scsi_req->cdb[2] = 0;
640 scsi_req->cdb[3] = 0;
641 scsi_req->cdb[4] = 0;
642 scsi_req->cdb[5] = 0;
643 scsi_req->cdb[6] = 0;
644 scsi_req->cdb[7] = 0;
645 scsi_req->cdb[8] = 0;
646 scsi_req->cdb[9] = 0;
647}
648
649static void
650skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
651 struct skd_request_context *skreq,
652 struct page *page,
653 u32 lba, u32 count)
654{
655 char *buf;
656 unsigned long len;
657 struct request *req;
658
659 buf = page_address(page);
660 len = SKD_DISCARD_CDB_LENGTH;
661
662 scsi_req->cdb[0] = UNMAP;
663 scsi_req->cdb[8] = len;
664
665 put_unaligned_be16(6 + 16, &buf[0]);
666 put_unaligned_be16(16, &buf[2]);
667 put_unaligned_be64(lba, &buf[8]);
668 put_unaligned_be32(count, &buf[16]);
669
670 if (!skd_bio) {
671 req = skreq->req;
672 blk_add_request_payload(req, page, len);
673 req->buffer = buf;
674 } else {
675 skreq->bio->bi_io_vec->bv_page = page;
676 skreq->bio->bi_io_vec->bv_offset = 0;
677 skreq->bio->bi_io_vec->bv_len = len;
678
679 skreq->bio->bi_vcnt = 1;
680 skreq->bio->bi_phys_segments = 1;
681 }
682}
683
684static void skd_request_fn_not_online(struct request_queue *q);
685
686static void skd_request_fn(struct request_queue *q)
687{
688 struct skd_device *skdev = q->queuedata;
689 struct skd_fitmsg_context *skmsg = NULL;
690 struct fit_msg_hdr *fmh = NULL;
691 struct skd_request_context *skreq;
692 struct request *req = NULL;
693 struct bio *bio = NULL;
694 struct skd_scsi_request *scsi_req;
695 struct page *page;
696 unsigned long io_flags;
697 int error;
698 u32 lba;
699 u32 count;
700 int data_dir;
701 u32 be_lba;
702 u32 be_count;
703 u64 be_dmaa;
704 u64 cmdctxt;
705 u32 timo_slot;
706 void *cmd_ptr;
707 int flush, fua;
708
709 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
710 skd_request_fn_not_online(q);
711 return;
712 }
713
714 if (skd_queue_stopped(skdev)) {
715 if (skdev->skmsg_free_list == NULL ||
716 skdev->skreq_free_list == NULL ||
717 skdev->in_flight >= skdev->queue_low_water_mark)
718 /* There is still some kind of shortage */
719 return;
720
721 skd_unstop_queue(skdev);
722 }
723
724 /*
725 * Stop conditions:
726 * - There are no more native requests
727 * - There are already the maximum number of requests in progress
728 * - There are no more skd_request_context entries
729 * - There are no more FIT msg buffers
730 */
731 for (;; ) {
732
733 flush = fua = 0;
734
735 if (!skd_bio) {
736 req = blk_peek_request(q);
737
738 /* Are there any native requests to start? */
739 if (req == NULL)
740 break;
741
742 lba = (u32)blk_rq_pos(req);
743 count = blk_rq_sectors(req);
744 data_dir = rq_data_dir(req);
745 io_flags = req->cmd_flags;
746
747 if (io_flags & REQ_FLUSH)
748 flush++;
749
750 if (io_flags & REQ_FUA)
751 fua++;
752
2e44b427 753 pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
754 "count=%u(0x%x) dir=%d\n",
755 skdev->name, __func__, __LINE__,
756 req, lba, lba, count, count, data_dir);
e67f86b3
AB
757 } else {
758 if (!list_empty(&skdev->flush_list)) {
759 /* Process data part of FLUSH request. */
760 bio = (struct bio *)skd_flush_cmd_dequeue(skdev);
761 flush++;
2e44b427 762 pr_debug("%s:%s:%d processing FLUSH request with data.\n",
763 skdev->name, __func__, __LINE__);
e67f86b3
AB
764 } else {
765 /* peek at our bio queue */
766 bio = bio_list_peek(&skdev->bio_queue);
767 }
768
769 /* Are there any native requests to start? */
770 if (bio == NULL)
771 break;
772
773 lba = (u32)bio->bi_sector;
774 count = bio_sectors(bio);
775 data_dir = bio_data_dir(bio);
776 io_flags = bio->bi_rw;
777
2e44b427 778 pr_debug("%s:%s:%d new bio=%p lba=%u(0x%x) "
779 "count=%u(0x%x) dir=%d\n",
780 skdev->name, __func__, __LINE__,
781 bio, lba, lba, count, count, data_dir);
e67f86b3
AB
782
783 if (io_flags & REQ_FLUSH)
784 flush++;
785
786 if (io_flags & REQ_FUA)
787 fua++;
788 }
789
790 /* At this point we know there is a request
791 * (from our bio q or req q depending on the way
792 * the driver is built do checks for resources.
793 */
794
795 /* Are too many requets already in progress? */
796 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
2e44b427 797 pr_debug("%s:%s:%d qdepth %d, limit %d\n",
798 skdev->name, __func__, __LINE__,
799 skdev->in_flight, skdev->cur_max_queue_depth);
e67f86b3
AB
800 break;
801 }
802
803 /* Is a skd_request_context available? */
804 skreq = skdev->skreq_free_list;
805 if (skreq == NULL) {
2e44b427 806 pr_debug("%s:%s:%d Out of req=%p\n",
807 skdev->name, __func__, __LINE__, q);
e67f86b3
AB
808 break;
809 }
810 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
811 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
812
813 /* Now we check to see if we can get a fit msg */
814 if (skmsg == NULL) {
815 if (skdev->skmsg_free_list == NULL) {
2e44b427 816 pr_debug("%s:%s:%d Out of msg\n",
817 skdev->name, __func__, __LINE__);
e67f86b3
AB
818 break;
819 }
820 }
821
822 skreq->flush_cmd = 0;
823 skreq->n_sg = 0;
824 skreq->sg_byte_count = 0;
825 skreq->discard_page = 0;
826
827 /*
828 * OK to now dequeue request from either bio or q.
829 *
830 * At this point we are comitted to either start or reject
831 * the native request. Note that skd_request_context is
832 * available but is still at the head of the free list.
833 */
834 if (!skd_bio) {
835 blk_start_request(req);
836 skreq->req = req;
837 skreq->fitmsg_id = 0;
838 } else {
839 if (unlikely(flush == SKD_FLUSH_DATA_SECOND)) {
840 skreq->bio = bio;
841 } else {
842 skreq->bio = bio_list_pop(&skdev->bio_queue);
843 SKD_ASSERT(skreq->bio == bio);
844 skreq->start_time = jiffies;
845 part_inc_in_flight(&skdev->disk->part0,
846 bio_data_dir(bio));
847 }
848
849 skreq->fitmsg_id = 0;
850 }
851
852 /* Either a FIT msg is in progress or we have to start one. */
853 if (skmsg == NULL) {
854 /* Are there any FIT msg buffers available? */
855 skmsg = skdev->skmsg_free_list;
856 if (skmsg == NULL) {
2e44b427 857 pr_debug("%s:%s:%d Out of msg skdev=%p\n",
858 skdev->name, __func__, __LINE__,
859 skdev);
e67f86b3
AB
860 break;
861 }
862 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
863 SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
864
865 skdev->skmsg_free_list = skmsg->next;
866
867 skmsg->state = SKD_MSG_STATE_BUSY;
868 skmsg->id += SKD_ID_INCR;
869
870 /* Initialize the FIT msg header */
871 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
872 memset(fmh, 0, sizeof(*fmh));
873 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
874 skmsg->length = sizeof(*fmh);
875 }
876
877 skreq->fitmsg_id = skmsg->id;
878
879 /*
880 * Note that a FIT msg may have just been started
881 * but contains no SoFIT requests yet.
882 */
883
884 /*
885 * Transcode the request, checking as we go. The outcome of
886 * the transcoding is represented by the error variable.
887 */
888 cmd_ptr = &skmsg->msg_buf[skmsg->length];
889 memset(cmd_ptr, 0, 32);
890
891 be_lba = cpu_to_be32(lba);
892 be_count = cpu_to_be32(count);
893 be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
894 cmdctxt = skreq->id + SKD_ID_INCR;
895
896 scsi_req = cmd_ptr;
897 scsi_req->hdr.tag = cmdctxt;
898 scsi_req->hdr.sg_list_dma_address = be_dmaa;
899
900 if (data_dir == READ)
901 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
902 else
903 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
904
905 if (io_flags & REQ_DISCARD) {
906 page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
907 if (!page) {
908 pr_err("request_fn:Page allocation failed.\n");
909 skd_end_request(skdev, skreq, -ENOMEM);
910 break;
911 }
912 skreq->discard_page = 1;
913 skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
914
915 } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
916 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
917 SKD_ASSERT(skreq->flush_cmd == 1);
918
919 } else {
920 skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
921 }
922
923 if (fua)
924 scsi_req->cdb[1] |= SKD_FUA_NV;
925
926 if ((!skd_bio && !req->bio) ||
927 (skd_bio && flush == SKD_FLUSH_ZERO_SIZE_FIRST))
928 goto skip_sg;
929
930 error = skd_preop_sg_list(skdev, skreq);
931
932 if (error != 0) {
933 /*
934 * Complete the native request with error.
935 * Note that the request context is still at the
936 * head of the free list, and that the SoFIT request
937 * was encoded into the FIT msg buffer but the FIT
938 * msg length has not been updated. In short, the
939 * only resource that has been allocated but might
940 * not be used is that the FIT msg could be empty.
941 */
2e44b427 942 pr_debug("%s:%s:%d error Out\n",
943 skdev->name, __func__, __LINE__);
e67f86b3
AB
944 skd_end_request(skdev, skreq, error);
945 continue;
946 }
947
948skip_sg:
949 scsi_req->hdr.sg_list_len_bytes =
950 cpu_to_be32(skreq->sg_byte_count);
951
952 /* Complete resource allocations. */
953 skdev->skreq_free_list = skreq->next;
954 skreq->state = SKD_REQ_STATE_BUSY;
955 skreq->id += SKD_ID_INCR;
956
957 skmsg->length += sizeof(struct skd_scsi_request);
958 fmh->num_protocol_cmds_coalesced++;
959
960 /*
961 * Update the active request counts.
962 * Capture the timeout timestamp.
963 */
964 skreq->timeout_stamp = skdev->timeout_stamp;
965 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
966 skdev->timeout_slot[timo_slot]++;
967 skdev->in_flight++;
2e44b427 968 pr_debug("%s:%s:%d req=0x%x busy=%d\n",
969 skdev->name, __func__, __LINE__,
970 skreq->id, skdev->in_flight);
e67f86b3
AB
971
972 /*
973 * If the FIT msg buffer is full send it.
974 */
975 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
976 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
977 skd_send_fitmsg(skdev, skmsg);
978 skmsg = NULL;
979 fmh = NULL;
980 }
981 }
982
983 /*
984 * Is a FIT msg in progress? If it is empty put the buffer back
985 * on the free list. If it is non-empty send what we got.
986 * This minimizes latency when there are fewer requests than
987 * what fits in a FIT msg.
988 */
989 if (skmsg != NULL) {
990 /* Bigger than just a FIT msg header? */
991 if (skmsg->length > sizeof(struct fit_msg_hdr)) {
2e44b427 992 pr_debug("%s:%s:%d sending msg=%p, len %d\n",
993 skdev->name, __func__, __LINE__,
994 skmsg, skmsg->length);
e67f86b3
AB
995 skd_send_fitmsg(skdev, skmsg);
996 } else {
997 /*
998 * The FIT msg is empty. It means we got started
999 * on the msg, but the requests were rejected.
1000 */
1001 skmsg->state = SKD_MSG_STATE_IDLE;
1002 skmsg->id += SKD_ID_INCR;
1003 skmsg->next = skdev->skmsg_free_list;
1004 skdev->skmsg_free_list = skmsg;
1005 }
1006 skmsg = NULL;
1007 fmh = NULL;
1008 }
1009
1010 /*
1011 * If req is non-NULL it means there is something to do but
1012 * we are out of a resource.
1013 */
1014 if (((!skd_bio) && req) ||
1015 ((skd_bio) && bio_list_peek(&skdev->bio_queue)))
1016 skd_stop_queue(skdev);
1017}
1018
1019static void skd_end_request_blk(struct skd_device *skdev,
1020 struct skd_request_context *skreq, int error)
1021{
1022 struct request *req = skreq->req;
1023 unsigned int io_flags = req->cmd_flags;
1024
1025 if ((io_flags & REQ_DISCARD) &&
1026 (skreq->discard_page == 1)) {
2e44b427 1027 pr_debug("%s:%s:%d skd_end_request_blk, free the page!",
1028 skdev->name, __func__, __LINE__);
e67f86b3
AB
1029 free_page((unsigned long)req->buffer);
1030 req->buffer = NULL;
1031 }
1032
1033 if (unlikely(error)) {
1034 struct request *req = skreq->req;
1035 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
1036 u32 lba = (u32)blk_rq_pos(req);
1037 u32 count = blk_rq_sectors(req);
1038
1039 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
1040 skd_name(skdev), cmd, lba, count, skreq->id);
1041 } else
2e44b427 1042 pr_debug("%s:%s:%d id=0x%x error=%d\n",
1043 skdev->name, __func__, __LINE__, skreq->id, error);
e67f86b3
AB
1044
1045 __blk_end_request_all(skreq->req, error);
1046}
1047
1048static int skd_preop_sg_list_blk(struct skd_device *skdev,
1049 struct skd_request_context *skreq)
1050{
1051 struct request *req = skreq->req;
1052 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
1053 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
1054 struct scatterlist *sg = &skreq->sg[0];
1055 int n_sg;
1056 int i;
1057
1058 skreq->sg_byte_count = 0;
1059
1060 /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
1061 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
1062
1063 n_sg = blk_rq_map_sg(skdev->queue, req, sg);
1064 if (n_sg <= 0)
1065 return -EINVAL;
1066
1067 /*
1068 * Map scatterlist to PCI bus addresses.
1069 * Note PCI might change the number of entries.
1070 */
1071 n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
1072 if (n_sg <= 0)
1073 return -EINVAL;
1074
1075 SKD_ASSERT(n_sg <= skdev->sgs_per_request);
1076
1077 skreq->n_sg = n_sg;
1078
1079 for (i = 0; i < n_sg; i++) {
1080 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1081 u32 cnt = sg_dma_len(&sg[i]);
1082 uint64_t dma_addr = sg_dma_address(&sg[i]);
1083
1084 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
1085 sgd->byte_count = cnt;
1086 skreq->sg_byte_count += cnt;
1087 sgd->host_side_addr = dma_addr;
1088 sgd->dev_side_addr = 0;
1089 }
1090
1091 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
1092 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
1093
1094 if (unlikely(skdev->dbg_level > 1)) {
2e44b427 1095 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1096 skdev->name, __func__, __LINE__,
1097 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
e67f86b3
AB
1098 for (i = 0; i < n_sg; i++) {
1099 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
2e44b427 1100 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
1101 "addr=0x%llx next=0x%llx\n",
1102 skdev->name, __func__, __LINE__,
1103 i, sgd->byte_count, sgd->control,
1104 sgd->host_side_addr, sgd->next_desc_ptr);
e67f86b3
AB
1105 }
1106 }
1107
1108 return 0;
1109}
1110
1111static void skd_postop_sg_list_blk(struct skd_device *skdev,
1112 struct skd_request_context *skreq)
1113{
1114 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
1115 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
1116
1117 /*
1118 * restore the next ptr for next IO request so we
1119 * don't have to set it every time.
1120 */
1121 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
1122 skreq->sksg_dma_address +
1123 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
1124 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
1125}
1126
1127static void skd_end_request_bio(struct skd_device *skdev,
1128 struct skd_request_context *skreq, int error)
1129{
1130 struct bio *bio = skreq->bio;
1131 int rw = bio_data_dir(bio);
1132 unsigned long io_flags = bio->bi_rw;
1133
1134 if ((io_flags & REQ_DISCARD) &&
1135 (skreq->discard_page == 1)) {
2e44b427 1136 pr_debug("%s:%s:%d biomode: skd_end_request: freeing DISCARD page.\n",
1137 skdev->name, __func__, __LINE__);
e67f86b3
AB
1138 free_page((unsigned long)page_address(bio->bi_io_vec->bv_page));
1139 }
1140
1141 if (unlikely(error)) {
1142 u32 lba = (u32)skreq->bio->bi_sector;
1143 u32 count = bio_sectors(skreq->bio);
1144 char *cmd = (rw == WRITE) ? "write" : "read";
1145 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
1146 skd_name(skdev), cmd, lba, count, skreq->id);
1147 }
1148 {
1149 int cpu = part_stat_lock();
1150
1151 if (likely(!error)) {
1152 part_stat_inc(cpu, &skdev->disk->part0, ios[rw]);
1153 part_stat_add(cpu, &skdev->disk->part0, sectors[rw],
1154 bio_sectors(bio));
1155 }
1156 part_stat_add(cpu, &skdev->disk->part0, ticks[rw],
1157 jiffies - skreq->start_time);
1158 part_dec_in_flight(&skdev->disk->part0, rw);
1159 part_stat_unlock();
1160 }
1161
2e44b427 1162 pr_debug("%s:%s:%d id=0x%x error=%d\n",
1163 skdev->name, __func__, __LINE__, skreq->id, error);
e67f86b3
AB
1164
1165 bio_endio(skreq->bio, error);
1166}
1167
1168static int skd_preop_sg_list_bio(struct skd_device *skdev,
1169 struct skd_request_context *skreq)
1170{
1171 struct bio *bio = skreq->bio;
1172 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
1173 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
1174 int n_sg;
1175 int i;
1176 struct bio_vec *vec;
1177 struct fit_sg_descriptor *sgd;
1178 u64 dma_addr;
1179 u32 count;
1180 int errs = 0;
1181 unsigned int io_flags = 0;
1182 io_flags |= bio->bi_rw;
1183
1184 skreq->sg_byte_count = 0;
1185 n_sg = skreq->n_sg = skreq->bio->bi_vcnt;
1186
1187 if (n_sg <= 0)
1188 return -EINVAL;
1189
1190 if (n_sg > skdev->sgs_per_request) {
1191 pr_err("(%s): sg overflow n=%d\n",
1192 skd_name(skdev), n_sg);
1193 skreq->n_sg = 0;
1194 return -EIO;
1195 }
1196
1197 for (i = 0; i < skreq->n_sg; i++) {
1198 vec = bio_iovec_idx(bio, i);
1199 dma_addr = pci_map_page(skdev->pdev,
1200 vec->bv_page,
1201 vec->bv_offset, vec->bv_len, pci_dir);
1202 count = vec->bv_len;
1203
1204 if (count == 0 || count > 64u * 1024u || (count & 3) != 0
1205 || (dma_addr & 3) != 0) {
1206 pr_err(
1207 "(%s): Bad sg ix=%d count=%d addr=0x%llx\n",
1208 skd_name(skdev), i, count, dma_addr);
1209 errs++;
1210 }
1211
1212 sgd = &skreq->sksg_list[i];
1213
1214 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
1215 sgd->byte_count = vec->bv_len;
1216 skreq->sg_byte_count += vec->bv_len;
1217 sgd->host_side_addr = dma_addr;
1218 sgd->dev_side_addr = 0; /* not used */
1219 }
1220
1221 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
1222 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
1223
1224
1225 if (!(io_flags & REQ_DISCARD)) {
1226 count = bio_sectors(bio) << 9u;
1227 if (count != skreq->sg_byte_count) {
1228 pr_err("(%s): mismatch count sg=%d req=%d\n",
1229 skd_name(skdev), skreq->sg_byte_count, count);
1230 errs++;
1231 }
1232 }
1233
1234 if (unlikely(skdev->dbg_level > 1)) {
2e44b427 1235 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1236 skdev->name, __func__, __LINE__,
1237 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
e67f86b3
AB
1238 for (i = 0; i < n_sg; i++) {
1239 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
2e44b427 1240 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
1241 "addr=0x%llx next=0x%llx\n",
1242 skdev->name, __func__, __LINE__,
1243 i, sgd->byte_count, sgd->control,
1244 sgd->host_side_addr, sgd->next_desc_ptr);
e67f86b3
AB
1245 }
1246 }
1247
1248 if (errs != 0) {
1249 skd_postop_sg_list(skdev, skreq);
1250 skreq->n_sg = 0;
1251 return -EIO;
1252 }
1253
1254 return 0;
1255}
1256
1257static int skd_preop_sg_list(struct skd_device *skdev,
1258 struct skd_request_context *skreq)
1259{
1260 if (!skd_bio)
1261 return skd_preop_sg_list_blk(skdev, skreq);
1262 else
1263 return skd_preop_sg_list_bio(skdev, skreq);
1264}
1265
1266static void skd_postop_sg_list_bio(struct skd_device *skdev,
1267 struct skd_request_context *skreq)
1268{
1269 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
1270 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
1271 int i;
1272 struct fit_sg_descriptor *sgd;
1273
1274 /*
1275 * restore the next ptr for next IO request so we
1276 * don't have to set it every time.
1277 */
1278 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
1279 skreq->sksg_dma_address +
1280 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
1281
1282 for (i = 0; i < skreq->n_sg; i++) {
1283 sgd = &skreq->sksg_list[i];
1284 pci_unmap_page(skdev->pdev, sgd->host_side_addr,
1285 sgd->byte_count, pci_dir);
1286 }
1287}
1288
1289static void skd_postop_sg_list(struct skd_device *skdev,
1290 struct skd_request_context *skreq)
1291{
1292 if (!skd_bio)
1293 skd_postop_sg_list_blk(skdev, skreq);
1294 else
1295 skd_postop_sg_list_bio(skdev, skreq);
1296}
1297
1298static void skd_end_request(struct skd_device *skdev,
1299 struct skd_request_context *skreq, int error)
1300{
1301 if (likely(!skd_bio))
1302 skd_end_request_blk(skdev, skreq, error);
1303 else
1304 skd_end_request_bio(skdev, skreq, error);
1305}
1306
1307static void skd_request_fn_not_online(struct request_queue *q)
1308{
1309 struct skd_device *skdev = q->queuedata;
1310 int error;
1311
1312 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
1313
1314 skd_log_skdev(skdev, "req_not_online");
1315 switch (skdev->state) {
1316 case SKD_DRVR_STATE_PAUSING:
1317 case SKD_DRVR_STATE_PAUSED:
1318 case SKD_DRVR_STATE_STARTING:
1319 case SKD_DRVR_STATE_RESTARTING:
1320 case SKD_DRVR_STATE_WAIT_BOOT:
1321 /* In case of starting, we haven't started the queue,
1322 * so we can't get here... but requests are
1323 * possibly hanging out waiting for us because we
1324 * reported the dev/skd0 already. They'll wait
1325 * forever if connect doesn't complete.
1326 * What to do??? delay dev/skd0 ??
1327 */
1328 case SKD_DRVR_STATE_BUSY:
1329 case SKD_DRVR_STATE_BUSY_IMMINENT:
1330 case SKD_DRVR_STATE_BUSY_ERASE:
1331 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1332 return;
1333
1334 case SKD_DRVR_STATE_BUSY_SANITIZE:
1335 case SKD_DRVR_STATE_STOPPING:
1336 case SKD_DRVR_STATE_SYNCING:
1337 case SKD_DRVR_STATE_FAULT:
1338 case SKD_DRVR_STATE_DISAPPEARED:
1339 default:
1340 error = -EIO;
1341 break;
1342 }
1343
1344 /* If we get here, terminate all pending block requeusts
1345 * with EIO and any scsi pass thru with appropriate sense
1346 */
1347
1348 skd_fail_all_pending(skdev);
1349}
1350
1351/*
1352 *****************************************************************************
1353 * TIMER
1354 *****************************************************************************
1355 */
1356
1357static void skd_timer_tick_not_online(struct skd_device *skdev);
1358
1359static void skd_timer_tick(ulong arg)
1360{
1361 struct skd_device *skdev = (struct skd_device *)arg;
1362
1363 u32 timo_slot;
1364 u32 overdue_timestamp;
1365 unsigned long reqflags;
1366 u32 state;
1367
1368 if (skdev->state == SKD_DRVR_STATE_FAULT)
1369 /* The driver has declared fault, and we want it to
1370 * stay that way until driver is reloaded.
1371 */
1372 return;
1373
1374 spin_lock_irqsave(&skdev->lock, reqflags);
1375
1376 state = SKD_READL(skdev, FIT_STATUS);
1377 state &= FIT_SR_DRIVE_STATE_MASK;
1378 if (state != skdev->drive_state)
1379 skd_isr_fwstate(skdev);
1380
1381 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
1382 skd_timer_tick_not_online(skdev);
1383 goto timer_func_out;
1384 }
1385 skdev->timeout_stamp++;
1386 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
1387
1388 /*
1389 * All requests that happened during the previous use of
1390 * this slot should be done by now. The previous use was
1391 * over 7 seconds ago.
1392 */
1393 if (skdev->timeout_slot[timo_slot] == 0)
1394 goto timer_func_out;
1395
1396 /* Something is overdue */
1397 overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
1398
2e44b427 1399 pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
1400 skdev->name, __func__, __LINE__,
1401 skdev->timeout_slot[timo_slot], skdev->in_flight);
e67f86b3
AB
1402 pr_err("(%s): Overdue IOs (%d), busy %d\n",
1403 skd_name(skdev), skdev->timeout_slot[timo_slot],
1404 skdev->in_flight);
1405
1406 skdev->timer_countdown = SKD_DRAINING_TIMO;
1407 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
1408 skdev->timo_slot = timo_slot;
1409 skd_stop_queue(skdev);
1410
1411timer_func_out:
1412 mod_timer(&skdev->timer, (jiffies + HZ));
1413
1414 spin_unlock_irqrestore(&skdev->lock, reqflags);
1415}
1416
1417static void skd_timer_tick_not_online(struct skd_device *skdev)
1418{
1419 switch (skdev->state) {
1420 case SKD_DRVR_STATE_IDLE:
1421 case SKD_DRVR_STATE_LOAD:
1422 break;
1423 case SKD_DRVR_STATE_BUSY_SANITIZE:
2e44b427 1424 pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
1425 skdev->name, __func__, __LINE__,
1426 skdev->drive_state, skdev->state);
e67f86b3
AB
1427 /* If we've been in sanitize for 3 seconds, we figure we're not
1428 * going to get anymore completions, so recover requests now
1429 */
1430 if (skdev->timer_countdown > 0) {
1431 skdev->timer_countdown--;
1432 return;
1433 }
1434 skd_recover_requests(skdev, 0);
1435 break;
1436
1437 case SKD_DRVR_STATE_BUSY:
1438 case SKD_DRVR_STATE_BUSY_IMMINENT:
1439 case SKD_DRVR_STATE_BUSY_ERASE:
2e44b427 1440 pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
1441 skdev->name, __func__, __LINE__,
1442 skdev->state, skdev->timer_countdown);
e67f86b3
AB
1443 if (skdev->timer_countdown > 0) {
1444 skdev->timer_countdown--;
1445 return;
1446 }
2e44b427 1447 pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
1448 skdev->name, __func__, __LINE__,
1449 skdev->state, skdev->timer_countdown);
e67f86b3
AB
1450 skd_restart_device(skdev);
1451 break;
1452
1453 case SKD_DRVR_STATE_WAIT_BOOT:
1454 case SKD_DRVR_STATE_STARTING:
1455 if (skdev->timer_countdown > 0) {
1456 skdev->timer_countdown--;
1457 return;
1458 }
1459 /* For now, we fault the drive. Could attempt resets to
1460 * revcover at some point. */
1461 skdev->state = SKD_DRVR_STATE_FAULT;
1462
1463 pr_err("(%s): DriveFault Connect Timeout (%x)\n",
1464 skd_name(skdev), skdev->drive_state);
1465
1466 /*start the queue so we can respond with error to requests */
1467 /* wakeup anyone waiting for startup complete */
1468 skd_start_queue(skdev);
1469 skdev->gendisk_on = -1;
1470 wake_up_interruptible(&skdev->waitq);
1471 break;
1472
1473 case SKD_DRVR_STATE_ONLINE:
1474 /* shouldn't get here. */
1475 break;
1476
1477 case SKD_DRVR_STATE_PAUSING:
1478 case SKD_DRVR_STATE_PAUSED:
1479 break;
1480
1481 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
2e44b427 1482 pr_debug("%s:%s:%d "
1483 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1484 skdev->name, __func__, __LINE__,
1485 skdev->timo_slot,
1486 skdev->timer_countdown,
1487 skdev->in_flight,
1488 skdev->timeout_slot[skdev->timo_slot]);
e67f86b3
AB
1489 /* if the slot has cleared we can let the I/O continue */
1490 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
2e44b427 1491 pr_debug("%s:%s:%d Slot drained, starting queue.\n",
1492 skdev->name, __func__, __LINE__);
e67f86b3
AB
1493 skdev->state = SKD_DRVR_STATE_ONLINE;
1494 skd_start_queue(skdev);
1495 return;
1496 }
1497 if (skdev->timer_countdown > 0) {
1498 skdev->timer_countdown--;
1499 return;
1500 }
1501 skd_restart_device(skdev);
1502 break;
1503
1504 case SKD_DRVR_STATE_RESTARTING:
1505 if (skdev->timer_countdown > 0) {
1506 skdev->timer_countdown--;
1507 return;
1508 }
1509 /* For now, we fault the drive. Could attempt resets to
1510 * revcover at some point. */
1511 skdev->state = SKD_DRVR_STATE_FAULT;
1512 pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
1513 skd_name(skdev), skdev->drive_state);
1514
1515 /*
1516 * Recovering does two things:
1517 * 1. completes IO with error
1518 * 2. reclaims dma resources
1519 * When is it safe to recover requests?
1520 * - if the drive state is faulted
1521 * - if the state is still soft reset after out timeout
1522 * - if the drive registers are dead (state = FF)
1523 * If it is "unsafe", we still need to recover, so we will
1524 * disable pci bus mastering and disable our interrupts.
1525 */
1526
1527 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1528 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1529 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1530 /* It never came out of soft reset. Try to
1531 * recover the requests and then let them
1532 * fail. This is to mitigate hung processes. */
1533 skd_recover_requests(skdev, 0);
1534 else {
1535 pr_err("(%s): Disable BusMaster (%x)\n",
1536 skd_name(skdev), skdev->drive_state);
1537 pci_disable_device(skdev->pdev);
1538 skd_disable_interrupts(skdev);
1539 skd_recover_requests(skdev, 0);
1540 }
1541
1542 /*start the queue so we can respond with error to requests */
1543 /* wakeup anyone waiting for startup complete */
1544 skd_start_queue(skdev);
1545 skdev->gendisk_on = -1;
1546 wake_up_interruptible(&skdev->waitq);
1547 break;
1548
1549 case SKD_DRVR_STATE_RESUMING:
1550 case SKD_DRVR_STATE_STOPPING:
1551 case SKD_DRVR_STATE_SYNCING:
1552 case SKD_DRVR_STATE_FAULT:
1553 case SKD_DRVR_STATE_DISAPPEARED:
1554 default:
1555 break;
1556 }
1557}
1558
1559static int skd_start_timer(struct skd_device *skdev)
1560{
1561 int rc;
1562
1563 init_timer(&skdev->timer);
1564 setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1565
1566 rc = mod_timer(&skdev->timer, (jiffies + HZ));
1567 if (rc)
1568 pr_err("%s: failed to start timer %d\n",
1569 __func__, rc);
1570 return rc;
1571}
1572
1573static void skd_kill_timer(struct skd_device *skdev)
1574{
1575 del_timer_sync(&skdev->timer);
1576}
1577
1578/*
1579 *****************************************************************************
1580 * IOCTL
1581 *****************************************************************************
1582 */
1583static int skd_ioctl_sg_io(struct skd_device *skdev,
1584 fmode_t mode, void __user *argp);
1585static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1586 struct skd_sg_io *sksgio);
1587static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1588 struct skd_sg_io *sksgio);
1589static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1590 struct skd_sg_io *sksgio);
1591static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1592 struct skd_sg_io *sksgio, int dxfer_dir);
1593static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1594 struct skd_sg_io *sksgio);
1595static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1596static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1597 struct skd_sg_io *sksgio);
1598static int skd_sg_io_put_status(struct skd_device *skdev,
1599 struct skd_sg_io *sksgio);
1600
1601static void skd_complete_special(struct skd_device *skdev,
1602 volatile struct fit_completion_entry_v1
1603 *skcomp,
1604 volatile struct fit_comp_error_info *skerr,
1605 struct skd_special_context *skspcl);
1606
1607static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1608 uint cmd_in, ulong arg)
1609{
1610 int rc = 0;
1611 struct gendisk *disk = bdev->bd_disk;
1612 struct skd_device *skdev = disk->private_data;
1613 void __user *p = (void *)arg;
1614
2e44b427 1615 pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
1616 skdev->name, __func__, __LINE__,
1617 disk->disk_name, current->comm, mode, cmd_in, arg);
e67f86b3
AB
1618
1619 if (!capable(CAP_SYS_ADMIN))
1620 return -EPERM;
1621
1622 switch (cmd_in) {
1623 case SG_SET_TIMEOUT:
1624 case SG_GET_TIMEOUT:
1625 case SG_GET_VERSION_NUM:
1626 rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
1627 break;
1628 case SG_IO:
1629 rc = skd_ioctl_sg_io(skdev, mode, p);
1630 break;
1631
1632 default:
1633 rc = -ENOTTY;
1634 break;
1635 }
1636
2e44b427 1637 pr_debug("%s:%s:%d %s: completion rc %d\n",
1638 skdev->name, __func__, __LINE__, disk->disk_name, rc);
e67f86b3
AB
1639 return rc;
1640}
1641
1642static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1643 void __user *argp)
1644{
1645 int rc;
1646 struct skd_sg_io sksgio;
1647
1648 memset(&sksgio, 0, sizeof(sksgio));
1649 sksgio.mode = mode;
1650 sksgio.argp = argp;
1651 sksgio.iov = &sksgio.no_iov_iov;
1652
1653 switch (skdev->state) {
1654 case SKD_DRVR_STATE_ONLINE:
1655 case SKD_DRVR_STATE_BUSY_IMMINENT:
1656 break;
1657
1658 default:
2e44b427 1659 pr_debug("%s:%s:%d drive not online\n",
1660 skdev->name, __func__, __LINE__);
e67f86b3
AB
1661 rc = -ENXIO;
1662 goto out;
1663 }
1664
f721bb0d
AB
1665 rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1666 if (rc)
1667 goto out;
1668
1669 rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1670 if (rc)
1671 goto out;
1672
1673 rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1674 if (rc)
1675 goto out;
1676
1677 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1678 if (rc)
e67f86b3
AB
1679 goto out;
1680
f721bb0d
AB
1681 rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1682 if (rc)
e67f86b3
AB
1683 goto out;
1684
f721bb0d
AB
1685 rc = skd_sg_io_await(skdev, &sksgio);
1686 if (rc)
1687 goto out;
1688
1689 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1690 if (rc)
1691 goto out;
1692
1693 rc = skd_sg_io_put_status(skdev, &sksgio);
1694 if (rc)
e67f86b3
AB
1695 goto out;
1696
1697 rc = 0;
1698
1699out:
1700 skd_sg_io_release_skspcl(skdev, &sksgio);
1701
1702 if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1703 kfree(sksgio.iov);
1704 return rc;
1705}
1706
1707static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1708 struct skd_sg_io *sksgio)
1709{
1710 struct sg_io_hdr *sgp = &sksgio->sg;
1711 int i, acc;
1712
1713 if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
2e44b427 1714 pr_debug("%s:%s:%d access sg failed %p\n",
1715 skdev->name, __func__, __LINE__, sksgio->argp);
e67f86b3
AB
1716 return -EFAULT;
1717 }
1718
1719 if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
2e44b427 1720 pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
1721 skdev->name, __func__, __LINE__, sksgio->argp);
e67f86b3
AB
1722 return -EFAULT;
1723 }
1724
1725 if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
2e44b427 1726 pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
1727 skdev->name, __func__, __LINE__, sgp->interface_id);
e67f86b3
AB
1728 return -EINVAL;
1729 }
1730
1731 if (sgp->cmd_len > sizeof(sksgio->cdb)) {
2e44b427 1732 pr_debug("%s:%s:%d cmd_len invalid %d\n",
1733 skdev->name, __func__, __LINE__, sgp->cmd_len);
e67f86b3
AB
1734 return -EINVAL;
1735 }
1736
1737 if (sgp->iovec_count > 256) {
2e44b427 1738 pr_debug("%s:%s:%d iovec_count invalid %d\n",
1739 skdev->name, __func__, __LINE__, sgp->iovec_count);
e67f86b3
AB
1740 return -EINVAL;
1741 }
1742
1743 if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
2e44b427 1744 pr_debug("%s:%s:%d dxfer_len invalid %d\n",
1745 skdev->name, __func__, __LINE__, sgp->dxfer_len);
e67f86b3
AB
1746 return -EINVAL;
1747 }
1748
1749 switch (sgp->dxfer_direction) {
1750 case SG_DXFER_NONE:
1751 acc = -1;
1752 break;
1753
1754 case SG_DXFER_TO_DEV:
1755 acc = VERIFY_READ;
1756 break;
1757
1758 case SG_DXFER_FROM_DEV:
1759 case SG_DXFER_TO_FROM_DEV:
1760 acc = VERIFY_WRITE;
1761 break;
1762
1763 default:
2e44b427 1764 pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
1765 skdev->name, __func__, __LINE__, sgp->dxfer_direction);
e67f86b3
AB
1766 return -EINVAL;
1767 }
1768
1769 if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
2e44b427 1770 pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
1771 skdev->name, __func__, __LINE__, sgp->cmdp);
e67f86b3
AB
1772 return -EFAULT;
1773 }
1774
1775 if (sgp->mx_sb_len != 0) {
1776 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
2e44b427 1777 pr_debug("%s:%s:%d access sbp failed %p\n",
1778 skdev->name, __func__, __LINE__, sgp->sbp);
e67f86b3
AB
1779 return -EFAULT;
1780 }
1781 }
1782
1783 if (sgp->iovec_count == 0) {
1784 sksgio->iov[0].iov_base = sgp->dxferp;
1785 sksgio->iov[0].iov_len = sgp->dxfer_len;
1786 sksgio->iovcnt = 1;
1787 sksgio->dxfer_len = sgp->dxfer_len;
1788 } else {
1789 struct sg_iovec *iov;
1790 uint nbytes = sizeof(*iov) * sgp->iovec_count;
1791 size_t iov_data_len;
1792
1793 iov = kmalloc(nbytes, GFP_KERNEL);
1794 if (iov == NULL) {
2e44b427 1795 pr_debug("%s:%s:%d alloc iovec failed %d\n",
1796 skdev->name, __func__, __LINE__,
1797 sgp->iovec_count);
e67f86b3
AB
1798 return -ENOMEM;
1799 }
1800 sksgio->iov = iov;
1801 sksgio->iovcnt = sgp->iovec_count;
1802
1803 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
2e44b427 1804 pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
1805 skdev->name, __func__, __LINE__, sgp->dxferp);
e67f86b3
AB
1806 return -EFAULT;
1807 }
1808
1809 /*
1810 * Sum up the vecs, making sure they don't overflow
1811 */
1812 iov_data_len = 0;
1813 for (i = 0; i < sgp->iovec_count; i++) {
1814 if (iov_data_len + iov[i].iov_len < iov_data_len)
1815 return -EINVAL;
1816 iov_data_len += iov[i].iov_len;
1817 }
1818
1819 /* SG_IO howto says that the shorter of the two wins */
1820 if (sgp->dxfer_len < iov_data_len) {
1821 sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1822 sgp->iovec_count,
1823 sgp->dxfer_len);
1824 sksgio->dxfer_len = sgp->dxfer_len;
1825 } else
1826 sksgio->dxfer_len = iov_data_len;
1827 }
1828
1829 if (sgp->dxfer_direction != SG_DXFER_NONE) {
1830 struct sg_iovec *iov = sksgio->iov;
1831 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1832 if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
2e44b427 1833 pr_debug("%s:%s:%d access data failed %p/%d\n",
1834 skdev->name, __func__, __LINE__,
1835 iov->iov_base, (int)iov->iov_len);
e67f86b3
AB
1836 return -EFAULT;
1837 }
1838 }
1839 }
1840
1841 return 0;
1842}
1843
1844static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1845 struct skd_sg_io *sksgio)
1846{
1847 struct skd_special_context *skspcl = NULL;
1848 int rc;
1849
1850 for (;; ) {
1851 ulong flags;
1852
1853 spin_lock_irqsave(&skdev->lock, flags);
1854 skspcl = skdev->skspcl_free_list;
1855 if (skspcl != NULL) {
1856 skdev->skspcl_free_list =
1857 (struct skd_special_context *)skspcl->req.next;
1858 skspcl->req.id += SKD_ID_INCR;
1859 skspcl->req.state = SKD_REQ_STATE_SETUP;
1860 skspcl->orphaned = 0;
1861 skspcl->req.n_sg = 0;
1862 }
1863 spin_unlock_irqrestore(&skdev->lock, flags);
1864
1865 if (skspcl != NULL) {
1866 rc = 0;
1867 break;
1868 }
1869
2e44b427 1870 pr_debug("%s:%s:%d blocking\n",
1871 skdev->name, __func__, __LINE__);
e67f86b3
AB
1872
1873 rc = wait_event_interruptible_timeout(
1874 skdev->waitq,
1875 (skdev->skspcl_free_list != NULL),
1876 msecs_to_jiffies(sksgio->sg.timeout));
1877
2e44b427 1878 pr_debug("%s:%s:%d unblocking, rc=%d\n",
1879 skdev->name, __func__, __LINE__, rc);
e67f86b3
AB
1880
1881 if (rc <= 0) {
1882 if (rc == 0)
1883 rc = -ETIMEDOUT;
1884 else
1885 rc = -EINTR;
1886 break;
1887 }
1888 /*
1889 * If we get here rc > 0 meaning the timeout to
1890 * wait_event_interruptible_timeout() had time left, hence the
1891 * sought event -- non-empty free list -- happened.
1892 * Retry the allocation.
1893 */
1894 }
1895 sksgio->skspcl = skspcl;
1896
1897 return rc;
1898}
1899
1900static int skd_skreq_prep_buffering(struct skd_device *skdev,
1901 struct skd_request_context *skreq,
1902 u32 dxfer_len)
1903{
1904 u32 resid = dxfer_len;
1905
1906 /*
1907 * The DMA engine must have aligned addresses and byte counts.
1908 */
1909 resid += (-resid) & 3;
1910 skreq->sg_byte_count = resid;
1911
1912 skreq->n_sg = 0;
1913
1914 while (resid > 0) {
1915 u32 nbytes = PAGE_SIZE;
1916 u32 ix = skreq->n_sg;
1917 struct scatterlist *sg = &skreq->sg[ix];
1918 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1919 struct page *page;
1920
1921 if (nbytes > resid)
1922 nbytes = resid;
1923
1924 page = alloc_page(GFP_KERNEL);
1925 if (page == NULL)
1926 return -ENOMEM;
1927
1928 sg_set_page(sg, page, nbytes, 0);
1929
1930 /* TODO: This should be going through a pci_???()
1931 * routine to do proper mapping. */
1932 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1933 sksg->byte_count = nbytes;
1934
1935 sksg->host_side_addr = sg_phys(sg);
1936
1937 sksg->dev_side_addr = 0;
1938 sksg->next_desc_ptr = skreq->sksg_dma_address +
1939 (ix + 1) * sizeof(*sksg);
1940
1941 skreq->n_sg++;
1942 resid -= nbytes;
1943 }
1944
1945 if (skreq->n_sg > 0) {
1946 u32 ix = skreq->n_sg - 1;
1947 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1948
1949 sksg->control = FIT_SGD_CONTROL_LAST;
1950 sksg->next_desc_ptr = 0;
1951 }
1952
1953 if (unlikely(skdev->dbg_level > 1)) {
1954 u32 i;
1955
2e44b427 1956 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1957 skdev->name, __func__, __LINE__,
1958 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
e67f86b3
AB
1959 for (i = 0; i < skreq->n_sg; i++) {
1960 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1961
2e44b427 1962 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
1963 "addr=0x%llx next=0x%llx\n",
1964 skdev->name, __func__, __LINE__,
1965 i, sgd->byte_count, sgd->control,
1966 sgd->host_side_addr, sgd->next_desc_ptr);
e67f86b3
AB
1967 }
1968 }
1969
1970 return 0;
1971}
1972
1973static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1974 struct skd_sg_io *sksgio)
1975{
1976 struct skd_special_context *skspcl = sksgio->skspcl;
1977 struct skd_request_context *skreq = &skspcl->req;
1978 u32 dxfer_len = sksgio->dxfer_len;
1979 int rc;
1980
1981 rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1982 /*
1983 * Eventually, errors or not, skd_release_special() is called
1984 * to recover allocations including partial allocations.
1985 */
1986 return rc;
1987}
1988
1989static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1990 struct skd_sg_io *sksgio, int dxfer_dir)
1991{
1992 struct skd_special_context *skspcl = sksgio->skspcl;
1993 u32 iov_ix = 0;
1994 struct sg_iovec curiov;
1995 u32 sksg_ix = 0;
1996 u8 *bufp = NULL;
1997 u32 buf_len = 0;
1998 u32 resid = sksgio->dxfer_len;
1999 int rc;
2000
2001 curiov.iov_len = 0;
2002 curiov.iov_base = NULL;
2003
2004 if (dxfer_dir != sksgio->sg.dxfer_direction) {
2005 if (dxfer_dir != SG_DXFER_TO_DEV ||
2006 sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
2007 return 0;
2008 }
2009
2010 while (resid > 0) {
2011 u32 nbytes = PAGE_SIZE;
2012
2013 if (curiov.iov_len == 0) {
2014 curiov = sksgio->iov[iov_ix++];
2015 continue;
2016 }
2017
2018 if (buf_len == 0) {
2019 struct page *page;
2020 page = sg_page(&skspcl->req.sg[sksg_ix++]);
2021 bufp = page_address(page);
2022 buf_len = PAGE_SIZE;
2023 }
2024
2025 nbytes = min_t(u32, nbytes, resid);
2026 nbytes = min_t(u32, nbytes, curiov.iov_len);
2027 nbytes = min_t(u32, nbytes, buf_len);
2028
2029 if (dxfer_dir == SG_DXFER_TO_DEV)
2030 rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
2031 else
2032 rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
2033
2034 if (rc)
2035 return -EFAULT;
2036
2037 resid -= nbytes;
2038 curiov.iov_len -= nbytes;
2039 curiov.iov_base += nbytes;
2040 buf_len -= nbytes;
2041 }
2042
2043 return 0;
2044}
2045
2046static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
2047 struct skd_sg_io *sksgio)
2048{
2049 struct skd_special_context *skspcl = sksgio->skspcl;
2050 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2051 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2052
2053 memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
2054
2055 /* Initialize the FIT msg header */
2056 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
2057 fmh->num_protocol_cmds_coalesced = 1;
2058
2059 /* Initialize the SCSI request */
2060 if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
2061 scsi_req->hdr.sg_list_dma_address =
2062 cpu_to_be64(skspcl->req.sksg_dma_address);
2063 scsi_req->hdr.tag = skspcl->req.id;
2064 scsi_req->hdr.sg_list_len_bytes =
2065 cpu_to_be32(skspcl->req.sg_byte_count);
2066 memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
2067
2068 skspcl->req.state = SKD_REQ_STATE_BUSY;
2069 skd_send_special_fitmsg(skdev, skspcl);
2070
2071 return 0;
2072}
2073
2074static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
2075{
2076 unsigned long flags;
2077 int rc;
2078
2079 rc = wait_event_interruptible_timeout(skdev->waitq,
2080 (sksgio->skspcl->req.state !=
2081 SKD_REQ_STATE_BUSY),
2082 msecs_to_jiffies(sksgio->sg.
2083 timeout));
2084
2085 spin_lock_irqsave(&skdev->lock, flags);
2086
2087 if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
2e44b427 2088 pr_debug("%s:%s:%d skspcl %p aborted\n",
2089 skdev->name, __func__, __LINE__, sksgio->skspcl);
e67f86b3
AB
2090
2091 /* Build check cond, sense and let command finish. */
2092 /* For a timeout, we must fabricate completion and sense
2093 * data to complete the command */
2094 sksgio->skspcl->req.completion.status =
2095 SAM_STAT_CHECK_CONDITION;
2096
2097 memset(&sksgio->skspcl->req.err_info, 0,
2098 sizeof(sksgio->skspcl->req.err_info));
2099 sksgio->skspcl->req.err_info.type = 0x70;
2100 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
2101 sksgio->skspcl->req.err_info.code = 0x44;
2102 sksgio->skspcl->req.err_info.qual = 0;
2103 rc = 0;
2104 } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
2105 /* No longer on the adapter. We finish. */
2106 rc = 0;
2107 else {
2108 /* Something's gone wrong. Still busy. Timeout or
2109 * user interrupted (control-C). Mark as an orphan
2110 * so it will be disposed when completed. */
2111 sksgio->skspcl->orphaned = 1;
2112 sksgio->skspcl = NULL;
2113 if (rc == 0) {
2e44b427 2114 pr_debug("%s:%s:%d timed out %p (%u ms)\n",
2115 skdev->name, __func__, __LINE__,
2116 sksgio, sksgio->sg.timeout);
e67f86b3
AB
2117 rc = -ETIMEDOUT;
2118 } else {
2e44b427 2119 pr_debug("%s:%s:%d cntlc %p\n",
2120 skdev->name, __func__, __LINE__, sksgio);
e67f86b3
AB
2121 rc = -EINTR;
2122 }
2123 }
2124
2125 spin_unlock_irqrestore(&skdev->lock, flags);
2126
2127 return rc;
2128}
2129
2130static int skd_sg_io_put_status(struct skd_device *skdev,
2131 struct skd_sg_io *sksgio)
2132{
2133 struct sg_io_hdr *sgp = &sksgio->sg;
2134 struct skd_special_context *skspcl = sksgio->skspcl;
2135 int resid = 0;
2136
2137 u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
2138
2139 sgp->status = skspcl->req.completion.status;
2140 resid = sksgio->dxfer_len - nb;
2141
2142 sgp->masked_status = sgp->status & STATUS_MASK;
2143 sgp->msg_status = 0;
2144 sgp->host_status = 0;
2145 sgp->driver_status = 0;
2146 sgp->resid = resid;
2147 if (sgp->masked_status || sgp->host_status || sgp->driver_status)
2148 sgp->info |= SG_INFO_CHECK;
2149
2e44b427 2150 pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
2151 skdev->name, __func__, __LINE__,
2152 sgp->status, sgp->masked_status, sgp->resid);
e67f86b3
AB
2153
2154 if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
2155 if (sgp->mx_sb_len > 0) {
2156 struct fit_comp_error_info *ei = &skspcl->req.err_info;
2157 u32 nbytes = sizeof(*ei);
2158
2159 nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
2160
2161 sgp->sb_len_wr = nbytes;
2162
2163 if (__copy_to_user(sgp->sbp, ei, nbytes)) {
2e44b427 2164 pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
2165 skdev->name, __func__, __LINE__,
2166 sgp->sbp);
e67f86b3
AB
2167 return -EFAULT;
2168 }
2169 }
2170 }
2171
2172 if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
2e44b427 2173 pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
2174 skdev->name, __func__, __LINE__, sksgio->argp);
e67f86b3
AB
2175 return -EFAULT;
2176 }
2177
2178 return 0;
2179}
2180
2181static int skd_sg_io_release_skspcl(struct skd_device *skdev,
2182 struct skd_sg_io *sksgio)
2183{
2184 struct skd_special_context *skspcl = sksgio->skspcl;
2185
2186 if (skspcl != NULL) {
2187 ulong flags;
2188
2189 sksgio->skspcl = NULL;
2190
2191 spin_lock_irqsave(&skdev->lock, flags);
2192 skd_release_special(skdev, skspcl);
2193 spin_unlock_irqrestore(&skdev->lock, flags);
2194 }
2195
2196 return 0;
2197}
2198
2199/*
2200 *****************************************************************************
2201 * INTERNAL REQUESTS -- generated by driver itself
2202 *****************************************************************************
2203 */
2204
2205static int skd_format_internal_skspcl(struct skd_device *skdev)
2206{
2207 struct skd_special_context *skspcl = &skdev->internal_skspcl;
2208 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
2209 struct fit_msg_hdr *fmh;
2210 uint64_t dma_address;
2211 struct skd_scsi_request *scsi;
2212
2213 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
2214 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
2215 fmh->num_protocol_cmds_coalesced = 1;
2216
2217 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
2218 memset(scsi, 0, sizeof(*scsi));
2219 dma_address = skspcl->req.sksg_dma_address;
2220 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
2221 sgd->control = FIT_SGD_CONTROL_LAST;
2222 sgd->byte_count = 0;
2223 sgd->host_side_addr = skspcl->db_dma_address;
2224 sgd->dev_side_addr = 0;
2225 sgd->next_desc_ptr = 0LL;
2226
2227 return 1;
2228}
2229
2230#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
2231
2232static void skd_send_internal_skspcl(struct skd_device *skdev,
2233 struct skd_special_context *skspcl,
2234 u8 opcode)
2235{
2236 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
2237 struct skd_scsi_request *scsi;
2238 unsigned char *buf = skspcl->data_buf;
2239 int i;
2240
2241 if (skspcl->req.state != SKD_REQ_STATE_IDLE)
2242 /*
2243 * A refresh is already in progress.
2244 * Just wait for it to finish.
2245 */
2246 return;
2247
2248 SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
2249 skspcl->req.state = SKD_REQ_STATE_BUSY;
2250 skspcl->req.id += SKD_ID_INCR;
2251
2252 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
2253 scsi->hdr.tag = skspcl->req.id;
2254
2255 memset(scsi->cdb, 0, sizeof(scsi->cdb));
2256
2257 switch (opcode) {
2258 case TEST_UNIT_READY:
2259 scsi->cdb[0] = TEST_UNIT_READY;
2260 sgd->byte_count = 0;
2261 scsi->hdr.sg_list_len_bytes = 0;
2262 break;
2263
2264 case READ_CAPACITY:
2265 scsi->cdb[0] = READ_CAPACITY;
2266 sgd->byte_count = SKD_N_READ_CAP_BYTES;
2267 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
2268 break;
2269
2270 case INQUIRY:
2271 scsi->cdb[0] = INQUIRY;
2272 scsi->cdb[1] = 0x01; /* evpd */
2273 scsi->cdb[2] = 0x80; /* serial number page */
2274 scsi->cdb[4] = 0x10;
2275 sgd->byte_count = 16;
2276 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
2277 break;
2278
2279 case SYNCHRONIZE_CACHE:
2280 scsi->cdb[0] = SYNCHRONIZE_CACHE;
2281 sgd->byte_count = 0;
2282 scsi->hdr.sg_list_len_bytes = 0;
2283 break;
2284
2285 case WRITE_BUFFER:
2286 scsi->cdb[0] = WRITE_BUFFER;
2287 scsi->cdb[1] = 0x02;
2288 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
2289 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
2290 sgd->byte_count = WR_BUF_SIZE;
2291 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
2292 /* fill incrementing byte pattern */
2293 for (i = 0; i < sgd->byte_count; i++)
2294 buf[i] = i & 0xFF;
2295 break;
2296
2297 case READ_BUFFER:
2298 scsi->cdb[0] = READ_BUFFER;
2299 scsi->cdb[1] = 0x02;
2300 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
2301 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
2302 sgd->byte_count = WR_BUF_SIZE;
2303 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
2304 memset(skspcl->data_buf, 0, sgd->byte_count);
2305 break;
2306
2307 default:
2308 SKD_ASSERT("Don't know what to send");
2309 return;
2310
2311 }
2312 skd_send_special_fitmsg(skdev, skspcl);
2313}
2314
2315static void skd_refresh_device_data(struct skd_device *skdev)
2316{
2317 struct skd_special_context *skspcl = &skdev->internal_skspcl;
2318
2319 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
2320}
2321
2322static int skd_chk_read_buf(struct skd_device *skdev,
2323 struct skd_special_context *skspcl)
2324{
2325 unsigned char *buf = skspcl->data_buf;
2326 int i;
2327
2328 /* check for incrementing byte pattern */
2329 for (i = 0; i < WR_BUF_SIZE; i++)
2330 if (buf[i] != (i & 0xFF))
2331 return 1;
2332
2333 return 0;
2334}
2335
2336static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
2337 u8 code, u8 qual, u8 fruc)
2338{
2339 /* If the check condition is of special interest, log a message */
2340 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
2341 && (code == 0x04) && (qual == 0x06)) {
2342 pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
2343 "ascq/fruc %02x/%02x/%02x/%02x\n",
2344 skd_name(skdev), key, code, qual, fruc);
2345 }
2346}
2347
2348static void skd_complete_internal(struct skd_device *skdev,
2349 volatile struct fit_completion_entry_v1
2350 *skcomp,
2351 volatile struct fit_comp_error_info *skerr,
2352 struct skd_special_context *skspcl)
2353{
2354 u8 *buf = skspcl->data_buf;
2355 u8 status;
2356 int i;
2357 struct skd_scsi_request *scsi =
2358 (struct skd_scsi_request *)&skspcl->msg_buf[64];
2359
2360 SKD_ASSERT(skspcl == &skdev->internal_skspcl);
2361
2e44b427 2362 pr_debug("%s:%s:%d complete internal %x\n",
2363 skdev->name, __func__, __LINE__, scsi->cdb[0]);
e67f86b3
AB
2364
2365 skspcl->req.completion = *skcomp;
2366 skspcl->req.state = SKD_REQ_STATE_IDLE;
2367 skspcl->req.id += SKD_ID_INCR;
2368
2369 status = skspcl->req.completion.status;
2370
2371 skd_log_check_status(skdev, status, skerr->key, skerr->code,
2372 skerr->qual, skerr->fruc);
2373
2374 switch (scsi->cdb[0]) {
2375 case TEST_UNIT_READY:
2376 if (status == SAM_STAT_GOOD)
2377 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
2378 else if ((status == SAM_STAT_CHECK_CONDITION) &&
2379 (skerr->key == MEDIUM_ERROR))
2380 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
2381 else {
2382 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2e44b427 2383 pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
2384 skdev->name, __func__, __LINE__,
2385 skdev->state);
e67f86b3
AB
2386 return;
2387 }
2e44b427 2388 pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
2389 skdev->name, __func__, __LINE__);
e67f86b3
AB
2390 skd_send_internal_skspcl(skdev, skspcl, 0x00);
2391 }
2392 break;
2393
2394 case WRITE_BUFFER:
2395 if (status == SAM_STAT_GOOD)
2396 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
2397 else {
2398 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2e44b427 2399 pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
2400 skdev->name, __func__, __LINE__,
2401 skdev->state);
e67f86b3
AB
2402 return;
2403 }
2e44b427 2404 pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
2405 skdev->name, __func__, __LINE__);
e67f86b3
AB
2406 skd_send_internal_skspcl(skdev, skspcl, 0x00);
2407 }
2408 break;
2409
2410 case READ_BUFFER:
2411 if (status == SAM_STAT_GOOD) {
2412 if (skd_chk_read_buf(skdev, skspcl) == 0)
2413 skd_send_internal_skspcl(skdev, skspcl,
2414 READ_CAPACITY);
2415 else {
2416 pr_err(
2417 "(%s):*** W/R Buffer mismatch %d ***\n",
2418 skd_name(skdev), skdev->connect_retries);
2419 if (skdev->connect_retries <
2420 SKD_MAX_CONNECT_RETRIES) {
2421 skdev->connect_retries++;
2422 skd_soft_reset(skdev);
2423 } else {
2424 pr_err(
2425 "(%s): W/R Buffer Connect Error\n",
2426 skd_name(skdev));
2427 return;
2428 }
2429 }
2430
2431 } else {
2432 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
2e44b427 2433 pr_debug("%s:%s:%d "
2434 "read buffer failed, don't send anymore state 0x%x\n",
2435 skdev->name, __func__, __LINE__,
2436 skdev->state);
e67f86b3
AB
2437 return;
2438 }
2e44b427 2439 pr_debug("%s:%s:%d "
2440 "**** read buffer failed, retry skerr\n",
2441 skdev->name, __func__, __LINE__);
e67f86b3
AB
2442 skd_send_internal_skspcl(skdev, skspcl, 0x00);
2443 }
2444 break;
2445
2446 case READ_CAPACITY:
2447 skdev->read_cap_is_valid = 0;
2448 if (status == SAM_STAT_GOOD) {
2449 skdev->read_cap_last_lba =
2450 (buf[0] << 24) | (buf[1] << 16) |
2451 (buf[2] << 8) | buf[3];
2452 skdev->read_cap_blocksize =
2453 (buf[4] << 24) | (buf[5] << 16) |
2454 (buf[6] << 8) | buf[7];
2455
2e44b427 2456 pr_debug("%s:%s:%d last lba %d, bs %d\n",
2457 skdev->name, __func__, __LINE__,
2458 skdev->read_cap_last_lba,
2459 skdev->read_cap_blocksize);
e67f86b3
AB
2460
2461 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2462
2463 skdev->read_cap_is_valid = 1;
2464
2465 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2466 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
2467 (skerr->key == MEDIUM_ERROR)) {
2468 skdev->read_cap_last_lba = ~0;
2469 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
2e44b427 2470 pr_debug("%s:%s:%d "
2471 "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
2472 skdev->name, __func__, __LINE__);
e67f86b3
AB
2473 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2474 } else {
2e44b427 2475 pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
2476 skdev->name, __func__, __LINE__);
e67f86b3
AB
2477 skd_send_internal_skspcl(skdev, skspcl,
2478 TEST_UNIT_READY);
2479 }
2480 break;
2481
2482 case INQUIRY:
2483 skdev->inquiry_is_valid = 0;
2484 if (status == SAM_STAT_GOOD) {
2485 skdev->inquiry_is_valid = 1;
2486
2487 for (i = 0; i < 12; i++)
2488 skdev->inq_serial_num[i] = buf[i + 4];
2489 skdev->inq_serial_num[12] = 0;
2490 }
2491
2492 if (skd_unquiesce_dev(skdev) < 0)
2e44b427 2493 pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
2494 skdev->name, __func__, __LINE__);
e67f86b3
AB
2495 /* connection is complete */
2496 skdev->connect_retries = 0;
2497 break;
2498
2499 case SYNCHRONIZE_CACHE:
2500 if (status == SAM_STAT_GOOD)
2501 skdev->sync_done = 1;
2502 else
2503 skdev->sync_done = -1;
2504 wake_up_interruptible(&skdev->waitq);
2505 break;
2506
2507 default:
2508 SKD_ASSERT("we didn't send this");
2509 }
2510}
2511
2512/*
2513 *****************************************************************************
2514 * FIT MESSAGES
2515 *****************************************************************************
2516 */
2517
2518static void skd_send_fitmsg(struct skd_device *skdev,
2519 struct skd_fitmsg_context *skmsg)
2520{
2521 u64 qcmd;
2522 struct fit_msg_hdr *fmh;
2523
2e44b427 2524 pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
2525 skdev->name, __func__, __LINE__,
2526 skmsg->mb_dma_address, skdev->in_flight);
2527 pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
2528 skdev->name, __func__, __LINE__,
2529 skmsg->msg_buf, skmsg->offset);
e67f86b3
AB
2530
2531 qcmd = skmsg->mb_dma_address;
2532 qcmd |= FIT_QCMD_QID_NORMAL;
2533
2534 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2535 skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2536
2537 if (unlikely(skdev->dbg_level > 1)) {
2538 u8 *bp = (u8 *)skmsg->msg_buf;
2539 int i;
2540 for (i = 0; i < skmsg->length; i += 8) {
2e44b427 2541 pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
2542 "%02x %02x %02x %02x\n",
2543 skdev->name, __func__, __LINE__,
2544 i, bp[i + 0], bp[i + 1], bp[i + 2],
2545 bp[i + 3], bp[i + 4], bp[i + 5],
2546 bp[i + 6], bp[i + 7]);
e67f86b3
AB
2547 if (i == 0)
2548 i = 64 - 8;
2549 }
2550 }
2551
2552 if (skmsg->length > 256)
2553 qcmd |= FIT_QCMD_MSGSIZE_512;
2554 else if (skmsg->length > 128)
2555 qcmd |= FIT_QCMD_MSGSIZE_256;
2556 else if (skmsg->length > 64)
2557 qcmd |= FIT_QCMD_MSGSIZE_128;
2558 else
2559 /*
2560 * This makes no sense because the FIT msg header is
2561 * 64 bytes. If the msg is only 64 bytes long it has
2562 * no payload.
2563 */
2564 qcmd |= FIT_QCMD_MSGSIZE_64;
2565
2566 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2567
2568}
2569
2570static void skd_send_special_fitmsg(struct skd_device *skdev,
2571 struct skd_special_context *skspcl)
2572{
2573 u64 qcmd;
2574
2575 if (unlikely(skdev->dbg_level > 1)) {
2576 u8 *bp = (u8 *)skspcl->msg_buf;
2577 int i;
2578
2579 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
2e44b427 2580 pr_debug("%s:%s:%d spcl[%2d] %02x %02x %02x %02x "
2581 "%02x %02x %02x %02x\n",
2582 skdev->name, __func__, __LINE__, i,
2583 bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
2584 bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
e67f86b3
AB
2585 if (i == 0)
2586 i = 64 - 8;
2587 }
2588
2e44b427 2589 pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2590 skdev->name, __func__, __LINE__,
2591 skspcl, skspcl->req.id, skspcl->req.sksg_list,
2592 skspcl->req.sksg_dma_address);
e67f86b3
AB
2593 for (i = 0; i < skspcl->req.n_sg; i++) {
2594 struct fit_sg_descriptor *sgd =
2595 &skspcl->req.sksg_list[i];
2596
2e44b427 2597 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
2598 "addr=0x%llx next=0x%llx\n",
2599 skdev->name, __func__, __LINE__,
2600 i, sgd->byte_count, sgd->control,
2601 sgd->host_side_addr, sgd->next_desc_ptr);
e67f86b3
AB
2602 }
2603 }
2604
2605 /*
2606 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2607 * and one 64-byte SSDI command.
2608 */
2609 qcmd = skspcl->mb_dma_address;
2610 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2611
2612 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2613}
2614
2615/*
2616 *****************************************************************************
2617 * COMPLETION QUEUE
2618 *****************************************************************************
2619 */
2620
2621static void skd_complete_other(struct skd_device *skdev,
2622 volatile struct fit_completion_entry_v1 *skcomp,
2623 volatile struct fit_comp_error_info *skerr);
2624
2625
2626static void skd_requeue_request(struct skd_device *skdev,
2627 struct skd_request_context *skreq);
2628
2629struct sns_info {
2630 u8 type;
2631 u8 stat;
2632 u8 key;
2633 u8 asc;
2634 u8 ascq;
2635 u8 mask;
2636 enum skd_check_status_action action;
2637};
2638
2639static struct sns_info skd_chkstat_table[] = {
2640 /* Good */
2641 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
2642 SKD_CHECK_STATUS_REPORT_GOOD },
2643
2644 /* Smart alerts */
2645 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
2646 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2647 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
2648 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2649 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
2650 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2651
2652 /* Retry (with limits) */
2653 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
2654 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2655 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
2656 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2657 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
2658 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2659 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
2660 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2661
2662 /* Busy (or about to be) */
2663 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
2664 SKD_CHECK_STATUS_BUSY_IMMINENT },
2665};
2666
2667/*
2668 * Look up status and sense data to decide how to handle the error
2669 * from the device.
2670 * mask says which fields must match e.g., mask=0x18 means check
2671 * type and stat, ignore key, asc, ascq.
2672 */
2673
2674static enum skd_check_status_action skd_check_status(struct skd_device *skdev,
2675 u8 cmp_status,
2676 volatile struct fit_comp_error_info *skerr)
2677{
2678 int i, n;
2679
2680 pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2681 skd_name(skdev), skerr->key, skerr->code, skerr->qual,
2682 skerr->fruc);
2683
2e44b427 2684 pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2685 skdev->name, __func__, __LINE__, skerr->type, cmp_status,
2686 skerr->key, skerr->code, skerr->qual, skerr->fruc);
e67f86b3
AB
2687
2688 /* Does the info match an entry in the good category? */
2689 n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2690 for (i = 0; i < n; i++) {
2691 struct sns_info *sns = &skd_chkstat_table[i];
2692
2693 if (sns->mask & 0x10)
2694 if (skerr->type != sns->type)
2695 continue;
2696
2697 if (sns->mask & 0x08)
2698 if (cmp_status != sns->stat)
2699 continue;
2700
2701 if (sns->mask & 0x04)
2702 if (skerr->key != sns->key)
2703 continue;
2704
2705 if (sns->mask & 0x02)
2706 if (skerr->code != sns->asc)
2707 continue;
2708
2709 if (sns->mask & 0x01)
2710 if (skerr->qual != sns->ascq)
2711 continue;
2712
2713 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
2714 pr_err("(%s): SMART Alert: sense key/asc/ascq "
2715 "%02x/%02x/%02x\n",
2716 skd_name(skdev), skerr->key,
2717 skerr->code, skerr->qual);
2718 }
2719 return sns->action;
2720 }
2721
2722 /* No other match, so nonzero status means error,
2723 * zero status means good
2724 */
2725 if (cmp_status) {
2e44b427 2726 pr_debug("%s:%s:%d status check: error\n",
2727 skdev->name, __func__, __LINE__);
e67f86b3
AB
2728 return SKD_CHECK_STATUS_REPORT_ERROR;
2729 }
2730
2e44b427 2731 pr_debug("%s:%s:%d status check good default\n",
2732 skdev->name, __func__, __LINE__);
e67f86b3
AB
2733 return SKD_CHECK_STATUS_REPORT_GOOD;
2734}
2735
2736static void skd_resolve_req_exception(struct skd_device *skdev,
2737 struct skd_request_context *skreq)
2738{
2739 u8 cmp_status = skreq->completion.status;
2740
2741 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2742 case SKD_CHECK_STATUS_REPORT_GOOD:
2743 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2744 skd_end_request(skdev, skreq, 0);
2745 break;
2746
2747 case SKD_CHECK_STATUS_BUSY_IMMINENT:
2748 skd_log_skreq(skdev, skreq, "retry(busy)");
2749 skd_requeue_request(skdev, skreq);
2750 pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
2751 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2752 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2753 skd_quiesce_dev(skdev);
2754 break;
2755
2756 case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2757 if (!skd_bio) {
2758 if ((unsigned long) ++skreq->req->special <
2759 SKD_MAX_RETRIES) {
2760 skd_log_skreq(skdev, skreq, "retry");
2761 skd_requeue_request(skdev, skreq);
2762 break;
2763 }
2764 }
2765 /* fall through to report error */
2766
2767 case SKD_CHECK_STATUS_REPORT_ERROR:
2768 default:
2769 skd_end_request(skdev, skreq, -EIO);
2770 break;
2771 }
2772}
2773
2774static void skd_requeue_request(struct skd_device *skdev,
2775 struct skd_request_context *skreq)
2776{
2777 if (!skd_bio) {
2778 blk_requeue_request(skdev->queue, skreq->req);
2779 } else {
2780 bio_list_add_head(&skdev->bio_queue, skreq->bio);
2781 skreq->bio = NULL;
2782 }
2783}
2784
2785
2786
2787/* assume spinlock is already held */
2788static void skd_release_skreq(struct skd_device *skdev,
2789 struct skd_request_context *skreq)
2790{
2791 u32 msg_slot;
2792 struct skd_fitmsg_context *skmsg;
2793
2794 u32 timo_slot;
2795
2796 /*
2797 * Reclaim the FIT msg buffer if this is
2798 * the first of the requests it carried to
2799 * be completed. The FIT msg buffer used to
2800 * send this request cannot be reused until
2801 * we are sure the s1120 card has copied
2802 * it to its memory. The FIT msg might have
2803 * contained several requests. As soon as
2804 * any of them are completed we know that
2805 * the entire FIT msg was transferred.
2806 * Only the first completed request will
2807 * match the FIT msg buffer id. The FIT
2808 * msg buffer id is immediately updated.
2809 * When subsequent requests complete the FIT
2810 * msg buffer id won't match, so we know
2811 * quite cheaply that it is already done.
2812 */
2813 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2814 SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2815
2816 skmsg = &skdev->skmsg_table[msg_slot];
2817 if (skmsg->id == skreq->fitmsg_id) {
2818 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2819 SKD_ASSERT(skmsg->outstanding > 0);
2820 skmsg->outstanding--;
2821 if (skmsg->outstanding == 0) {
2822 skmsg->state = SKD_MSG_STATE_IDLE;
2823 skmsg->id += SKD_ID_INCR;
2824 skmsg->next = skdev->skmsg_free_list;
2825 skdev->skmsg_free_list = skmsg;
2826 }
2827 }
2828
2829 /*
2830 * Decrease the number of active requests.
2831 * Also decrements the count in the timeout slot.
2832 */
2833 SKD_ASSERT(skdev->in_flight > 0);
2834 skdev->in_flight -= 1;
2835
2836 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2837 SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2838 skdev->timeout_slot[timo_slot] -= 1;
2839
2840 /*
2841 * Reset backpointer
2842 */
2843 if (likely(!skd_bio))
2844 skreq->req = NULL;
2845 else
2846 skreq->bio = NULL;
2847
2848
2849 /*
2850 * Reclaim the skd_request_context
2851 */
2852 skreq->state = SKD_REQ_STATE_IDLE;
2853 skreq->id += SKD_ID_INCR;
2854 skreq->next = skdev->skreq_free_list;
2855 skdev->skreq_free_list = skreq;
2856}
2857
2858#define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
2859
2860static void skd_do_inq_page_00(struct skd_device *skdev,
2861 volatile struct fit_completion_entry_v1 *skcomp,
2862 volatile struct fit_comp_error_info *skerr,
2863 uint8_t *cdb, uint8_t *buf)
2864{
2865 uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2866
2867 /* Caller requested "supported pages". The driver needs to insert
2868 * its page.
2869 */
2e44b427 2870 pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
2871 skdev->name, __func__, __LINE__);
e67f86b3
AB
2872
2873 /* If the device rejected the request because the CDB was
2874 * improperly formed, then just leave.
2875 */
2876 if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2877 skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2878 return;
2879
2880 /* Get the amount of space the caller allocated */
2881 max_bytes = (cdb[3] << 8) | cdb[4];
2882
2883 /* Get the number of pages actually returned by the device */
2884 drive_pages = (buf[2] << 8) | buf[3];
2885 drive_bytes = drive_pages + 4;
2886 new_size = drive_pages + 1;
2887
2888 /* Supported pages must be in numerical order, so find where
2889 * the driver page needs to be inserted into the list of
2890 * pages returned by the device.
2891 */
2892 for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2893 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2894 return; /* Device using this page code. abort */
2895 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2896 break;
2897 }
2898
2899 if (insert_pt < max_bytes) {
2900 uint16_t u;
2901
2902 /* Shift everything up one byte to make room. */
2903 for (u = new_size + 3; u > insert_pt; u--)
2904 buf[u] = buf[u - 1];
2905 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2906
2907 /* SCSI byte order increment of num_returned_bytes by 1 */
2908 skcomp->num_returned_bytes =
2909 be32_to_cpu(skcomp->num_returned_bytes) + 1;
2910 skcomp->num_returned_bytes =
2911 be32_to_cpu(skcomp->num_returned_bytes);
2912 }
2913
2914 /* update page length field to reflect the driver's page too */
2915 buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2916 buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2917}
2918
2919static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2920{
2921 int pcie_reg;
2922 u16 pci_bus_speed;
2923 u8 pci_lanes;
2924
2925 pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2926 if (pcie_reg) {
2927 u16 linksta;
2928 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2929
2930 pci_bus_speed = linksta & 0xF;
2931 pci_lanes = (linksta & 0x3F0) >> 4;
2932 } else {
2933 *speed = STEC_LINK_UNKNOWN;
2934 *width = 0xFF;
2935 return;
2936 }
2937
2938 switch (pci_bus_speed) {
2939 case 1:
2940 *speed = STEC_LINK_2_5GTS;
2941 break;
2942 case 2:
2943 *speed = STEC_LINK_5GTS;
2944 break;
2945 case 3:
2946 *speed = STEC_LINK_8GTS;
2947 break;
2948 default:
2949 *speed = STEC_LINK_UNKNOWN;
2950 break;
2951 }
2952
2953 if (pci_lanes <= 0x20)
2954 *width = pci_lanes;
2955 else
2956 *width = 0xFF;
2957}
2958
2959static void skd_do_inq_page_da(struct skd_device *skdev,
2960 volatile struct fit_completion_entry_v1 *skcomp,
2961 volatile struct fit_comp_error_info *skerr,
2962 uint8_t *cdb, uint8_t *buf)
2963{
2964 unsigned max_bytes;
2965 struct driver_inquiry_data inq;
2966 u16 val;
2967
2e44b427 2968 pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
2969 skdev->name, __func__, __LINE__);
e67f86b3
AB
2970
2971 memset(&inq, 0, sizeof(inq));
2972
2973 inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2974
2975 if (skdev->pdev && skdev->pdev->bus) {
2976 skd_get_link_info(skdev->pdev,
2977 &inq.pcie_link_speed, &inq.pcie_link_lanes);
2978 inq.pcie_bus_number = cpu_to_be16(skdev->pdev->bus->number);
2979 inq.pcie_device_number = PCI_SLOT(skdev->pdev->devfn);
2980 inq.pcie_function_number = PCI_FUNC(skdev->pdev->devfn);
2981
2982 pci_read_config_word(skdev->pdev, PCI_VENDOR_ID, &val);
2983 inq.pcie_vendor_id = cpu_to_be16(val);
2984
2985 pci_read_config_word(skdev->pdev, PCI_DEVICE_ID, &val);
2986 inq.pcie_device_id = cpu_to_be16(val);
2987
2988 pci_read_config_word(skdev->pdev, PCI_SUBSYSTEM_VENDOR_ID,
2989 &val);
2990 inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
2991
2992 pci_read_config_word(skdev->pdev, PCI_SUBSYSTEM_ID, &val);
2993 inq.pcie_subsystem_device_id = cpu_to_be16(val);
2994 } else {
2995 inq.pcie_bus_number = 0xFFFF;
2996 inq.pcie_device_number = 0xFF;
2997 inq.pcie_function_number = 0xFF;
2998 inq.pcie_link_speed = 0xFF;
2999 inq.pcie_link_lanes = 0xFF;
3000 inq.pcie_vendor_id = 0xFFFF;
3001 inq.pcie_device_id = 0xFFFF;
3002 inq.pcie_subsystem_vendor_id = 0xFFFF;
3003 inq.pcie_subsystem_device_id = 0xFFFF;
3004 }
3005
3006 /* Driver version, fixed lenth, padded with spaces on the right */
3007 inq.driver_version_length = sizeof(inq.driver_version);
3008 memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
3009 memcpy(inq.driver_version, DRV_VER_COMPL,
3010 min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
3011
3012 inq.page_length = cpu_to_be16((sizeof(inq) - 4));
3013
3014 /* Clear the error set by the device */
3015 skcomp->status = SAM_STAT_GOOD;
3016 memset((void *)skerr, 0, sizeof(*skerr));
3017
3018 /* copy response into output buffer */
3019 max_bytes = (cdb[3] << 8) | cdb[4];
3020 memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
3021
3022 skcomp->num_returned_bytes =
3023 be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
3024}
3025
3026static void skd_do_driver_inq(struct skd_device *skdev,
3027 volatile struct fit_completion_entry_v1 *skcomp,
3028 volatile struct fit_comp_error_info *skerr,
3029 uint8_t *cdb, uint8_t *buf)
3030{
3031 if (!buf)
3032 return;
3033 else if (cdb[0] != INQUIRY)
3034 return; /* Not an INQUIRY */
3035 else if ((cdb[1] & 1) == 0)
3036 return; /* EVPD not set */
3037 else if (cdb[2] == 0)
3038 /* Need to add driver's page to supported pages list */
3039 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
3040 else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
3041 /* Caller requested driver's page */
3042 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
3043}
3044
3045static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
3046{
3047 if (!sg)
3048 return NULL;
3049 if (!sg_page(sg))
3050 return NULL;
3051 return sg_virt(sg);
3052}
3053
3054static void skd_process_scsi_inq(struct skd_device *skdev,
3055 volatile struct fit_completion_entry_v1
3056 *skcomp,
3057 volatile struct fit_comp_error_info *skerr,
3058 struct skd_special_context *skspcl)
3059{
3060 uint8_t *buf;
3061 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
3062 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
3063
3064 dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
3065 skspcl->req.sg_data_dir);
3066 buf = skd_sg_1st_page_ptr(skspcl->req.sg);
3067
3068 if (buf)
3069 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
3070}
3071
3072
3073static int skd_isr_completion_posted(struct skd_device *skdev,
3074 int limit, int *enqueued)
3075{
3076 volatile struct fit_completion_entry_v1 *skcmp = NULL;
3077 volatile struct fit_comp_error_info *skerr;
3078 u16 req_id;
3079 u32 req_slot;
3080 struct skd_request_context *skreq;
3081 u16 cmp_cntxt = 0;
3082 u8 cmp_status = 0;
3083 u8 cmp_cycle = 0;
3084 u32 cmp_bytes = 0;
3085 int rc = 0;
3086 int processed = 0;
3087 int ret;
3088
3089
3090 for (;; ) {
3091 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
3092
3093 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
3094 cmp_cycle = skcmp->cycle;
3095 cmp_cntxt = skcmp->tag;
3096 cmp_status = skcmp->status;
3097 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
3098
3099 skerr = &skdev->skerr_table[skdev->skcomp_ix];
3100
2e44b427 3101 pr_debug("%s:%s:%d "
3102 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
3103 "busy=%d rbytes=0x%x proto=%d\n",
3104 skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
3105 skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
3106 skdev->in_flight, cmp_bytes, skdev->proto_ver);
e67f86b3
AB
3107
3108 if (cmp_cycle != skdev->skcomp_cycle) {
2e44b427 3109 pr_debug("%s:%s:%d end of completions\n",
3110 skdev->name, __func__, __LINE__);
e67f86b3
AB
3111 break;
3112 }
3113 /*
3114 * Update the completion queue head index and possibly
3115 * the completion cycle count. 8-bit wrap-around.
3116 */
3117 skdev->skcomp_ix++;
3118 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
3119 skdev->skcomp_ix = 0;
3120 skdev->skcomp_cycle++;
3121 }
3122
3123 /*
3124 * The command context is a unique 32-bit ID. The low order
3125 * bits help locate the request. The request is usually a
3126 * r/w request (see skd_start() above) or a special request.
3127 */
3128 req_id = cmp_cntxt;
3129 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
3130
3131 /* Is this other than a r/w request? */
3132 if (req_slot >= skdev->num_req_context) {
3133 /*
3134 * This is not a completion for a r/w request.
3135 */
3136 skd_complete_other(skdev, skcmp, skerr);
3137 continue;
3138 }
3139
3140 skreq = &skdev->skreq_table[req_slot];
3141
3142 /*
3143 * Make sure the request ID for the slot matches.
3144 */
3145 if (skreq->id != req_id) {
2e44b427 3146 pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
3147 skdev->name, __func__, __LINE__,
3148 req_id, skreq->id);
e67f86b3
AB
3149 {
3150 u16 new_id = cmp_cntxt;
3151 pr_err("(%s): Completion mismatch "
3152 "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
3153 skd_name(skdev), req_id,
3154 skreq->id, new_id);
3155
3156 continue;
3157 }
3158 }
3159
3160 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
3161
3162 if (skreq->state == SKD_REQ_STATE_ABORTED) {
2e44b427 3163 pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
3164 skdev->name, __func__, __LINE__,
3165 skreq, skreq->id);
e67f86b3
AB
3166 /* a previously timed out command can
3167 * now be cleaned up */
3168 skd_release_skreq(skdev, skreq);
3169 continue;
3170 }
3171
3172 skreq->completion = *skcmp;
3173 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
3174 skreq->err_info = *skerr;
3175 skd_log_check_status(skdev, cmp_status, skerr->key,
3176 skerr->code, skerr->qual,
3177 skerr->fruc);
3178 }
3179 /* Release DMA resources for the request. */
3180 if (skreq->n_sg > 0)
3181 skd_postop_sg_list(skdev, skreq);
3182
3183 if (((!skd_bio) && !skreq->req) ||
3184 ((skd_bio) && !skreq->bio)) {
2e44b427 3185 pr_debug("%s:%s:%d NULL backptr skdreq %p, "
3186 "req=0x%x req_id=0x%x\n",
3187 skdev->name, __func__, __LINE__,
3188 skreq, skreq->id, req_id);
e67f86b3
AB
3189 } else {
3190 /*
3191 * Capture the outcome and post it back to the
3192 * native request.
3193 */
3194 if (likely(cmp_status == SAM_STAT_GOOD)) {
3195 if (unlikely(skreq->flush_cmd)) {
3196 if (skd_bio) {
3197 /* if empty size bio, we are all done */
3198 if (bio_sectors(skreq->bio) == 0) {
3199 skd_end_request(skdev, skreq, 0);
3200 } else {
3201 ret = skd_flush_cmd_enqueue(skdev, (void *)skreq->bio);
3202 if (ret != 0) {
3203 pr_err("Failed to enqueue flush bio with Data. Err=%d.\n", ret);
3204 skd_end_request(skdev, skreq, ret);
3205 } else {
3206 ((*enqueued)++);
3207 }
3208 }
3209 } else {
3210 skd_end_request(skdev, skreq, 0);
3211 }
3212 } else {
3213 skd_end_request(skdev, skreq, 0);
3214 }
3215 } else {
3216 skd_resolve_req_exception(skdev, skreq);
3217 }
3218 }
3219
3220 /*
3221 * Release the skreq, its FIT msg (if one), timeout slot,
3222 * and queue depth.
3223 */
3224 skd_release_skreq(skdev, skreq);
3225
3226 /* skd_isr_comp_limit equal zero means no limit */
3227 if (limit) {
3228 if (++processed >= limit) {
3229 rc = 1;
3230 break;
3231 }
3232 }
3233 }
3234
3235 if ((skdev->state == SKD_DRVR_STATE_PAUSING)
3236 && (skdev->in_flight) == 0) {
3237 skdev->state = SKD_DRVR_STATE_PAUSED;
3238 wake_up_interruptible(&skdev->waitq);
3239 }
3240
3241 return rc;
3242}
3243
3244static void skd_complete_other(struct skd_device *skdev,
3245 volatile struct fit_completion_entry_v1 *skcomp,
3246 volatile struct fit_comp_error_info *skerr)
3247{
3248 u32 req_id = 0;
3249 u32 req_table;
3250 u32 req_slot;
3251 struct skd_special_context *skspcl;
3252
3253 req_id = skcomp->tag;
3254 req_table = req_id & SKD_ID_TABLE_MASK;
3255 req_slot = req_id & SKD_ID_SLOT_MASK;
3256
2e44b427 3257 pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
3258 skdev->name, __func__, __LINE__,
3259 req_table, req_id, req_slot);
e67f86b3
AB
3260
3261 /*
3262 * Based on the request id, determine how to dispatch this completion.
3263 * This swich/case is finding the good cases and forwarding the
3264 * completion entry. Errors are reported below the switch.
3265 */
3266 switch (req_table) {
3267 case SKD_ID_RW_REQUEST:
3268 /*
3269 * The caller, skd_completion_posted_isr() above,
3270 * handles r/w requests. The only way we get here
3271 * is if the req_slot is out of bounds.
3272 */
3273 break;
3274
3275 case SKD_ID_SPECIAL_REQUEST:
3276 /*
3277 * Make sure the req_slot is in bounds and that the id
3278 * matches.
3279 */
3280 if (req_slot < skdev->n_special) {
3281 skspcl = &skdev->skspcl_table[req_slot];
3282 if (skspcl->req.id == req_id &&
3283 skspcl->req.state == SKD_REQ_STATE_BUSY) {
3284 skd_complete_special(skdev,
3285 skcomp, skerr, skspcl);
3286 return;
3287 }
3288 }
3289 break;
3290
3291 case SKD_ID_INTERNAL:
3292 if (req_slot == 0) {
3293 skspcl = &skdev->internal_skspcl;
3294 if (skspcl->req.id == req_id &&
3295 skspcl->req.state == SKD_REQ_STATE_BUSY) {
3296 skd_complete_internal(skdev,
3297 skcomp, skerr, skspcl);
3298 return;
3299 }
3300 }
3301 break;
3302
3303 case SKD_ID_FIT_MSG:
3304 /*
3305 * These id's should never appear in a completion record.
3306 */
3307 break;
3308
3309 default:
3310 /*
3311 * These id's should never appear anywhere;
3312 */
3313 break;
3314 }
3315
3316 /*
3317 * If we get here it is a bad or stale id.
3318 */
3319}
3320
3321static void skd_complete_special(struct skd_device *skdev,
3322 volatile struct fit_completion_entry_v1
3323 *skcomp,
3324 volatile struct fit_comp_error_info *skerr,
3325 struct skd_special_context *skspcl)
3326{
2e44b427 3327 pr_debug("%s:%s:%d completing special request %p\n",
3328 skdev->name, __func__, __LINE__, skspcl);
e67f86b3
AB
3329 if (skspcl->orphaned) {
3330 /* Discard orphaned request */
3331 /* ?: Can this release directly or does it need
3332 * to use a worker? */
2e44b427 3333 pr_debug("%s:%s:%d release orphaned %p\n",
3334 skdev->name, __func__, __LINE__, skspcl);
e67f86b3
AB
3335 skd_release_special(skdev, skspcl);
3336 return;
3337 }
3338
3339 skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
3340
3341 skspcl->req.state = SKD_REQ_STATE_COMPLETED;
3342 skspcl->req.completion = *skcomp;
3343 skspcl->req.err_info = *skerr;
3344
3345 skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
3346 skerr->code, skerr->qual, skerr->fruc);
3347
3348 wake_up_interruptible(&skdev->waitq);
3349}
3350
3351/* assume spinlock is already held */
3352static void skd_release_special(struct skd_device *skdev,
3353 struct skd_special_context *skspcl)
3354{
3355 int i, was_depleted;
3356
3357 for (i = 0; i < skspcl->req.n_sg; i++) {
3358
3359 struct page *page = sg_page(&skspcl->req.sg[i]);
3360 __free_page(page);
3361 }
3362
3363 was_depleted = (skdev->skspcl_free_list == NULL);
3364
3365 skspcl->req.state = SKD_REQ_STATE_IDLE;
3366 skspcl->req.id += SKD_ID_INCR;
3367 skspcl->req.next =
3368 (struct skd_request_context *)skdev->skspcl_free_list;
3369 skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
3370
3371 if (was_depleted) {
2e44b427 3372 pr_debug("%s:%s:%d skspcl was depleted\n",
3373 skdev->name, __func__, __LINE__);
e67f86b3
AB
3374 /* Free list was depleted. Their might be waiters. */
3375 wake_up_interruptible(&skdev->waitq);
3376 }
3377}
3378
3379static void skd_reset_skcomp(struct skd_device *skdev)
3380{
3381 u32 nbytes;
3382 struct fit_completion_entry_v1 *skcomp;
3383
3384 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
3385 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
3386
3387 memset(skdev->skcomp_table, 0, nbytes);
3388
3389 skdev->skcomp_ix = 0;
3390 skdev->skcomp_cycle = 1;
3391}
3392
3393/*
3394 *****************************************************************************
3395 * INTERRUPTS
3396 *****************************************************************************
3397 */
3398static void skd_completion_worker(struct work_struct *work)
3399{
3400 struct skd_device *skdev =
3401 container_of(work, struct skd_device, completion_worker);
3402 unsigned long flags;
3403 int flush_enqueued = 0;
3404
3405 spin_lock_irqsave(&skdev->lock, flags);
3406
3407 /*
3408 * pass in limit=0, which means no limit..
3409 * process everything in compq
3410 */
3411 skd_isr_completion_posted(skdev, 0, &flush_enqueued);
3412 skd_request_fn(skdev->queue);
3413
3414 spin_unlock_irqrestore(&skdev->lock, flags);
3415}
3416
3417static void skd_isr_msg_from_dev(struct skd_device *skdev);
3418
3419irqreturn_t
3420static skd_isr(int irq, void *ptr)
3421{
3422 struct skd_device *skdev;
3423 u32 intstat;
3424 u32 ack;
3425 int rc = 0;
3426 int deferred = 0;
3427 int flush_enqueued = 0;
3428
3429 skdev = (struct skd_device *)ptr;
3430 spin_lock(&skdev->lock);
3431
3432 for (;; ) {
3433 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3434
3435 ack = FIT_INT_DEF_MASK;
3436 ack &= intstat;
3437
2e44b427 3438 pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
3439 skdev->name, __func__, __LINE__, intstat, ack);
e67f86b3
AB
3440
3441 /* As long as there is an int pending on device, keep
3442 * running loop. When none, get out, but if we've never
3443 * done any processing, call completion handler?
3444 */
3445 if (ack == 0) {
3446 /* No interrupts on device, but run the completion
3447 * processor anyway?
3448 */
3449 if (rc == 0)
3450 if (likely (skdev->state
3451 == SKD_DRVR_STATE_ONLINE))
3452 deferred = 1;
3453 break;
3454 }
3455
3456 rc = IRQ_HANDLED;
3457
3458 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
3459
3460 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
3461 (skdev->state != SKD_DRVR_STATE_STOPPING))) {
3462 if (intstat & FIT_ISH_COMPLETION_POSTED) {
3463 /*
3464 * If we have already deferred completion
3465 * processing, don't bother running it again
3466 */
3467 if (deferred == 0)
3468 deferred =
3469 skd_isr_completion_posted(skdev,
3470 skd_isr_comp_limit, &flush_enqueued);
3471 }
3472
3473 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
3474 skd_isr_fwstate(skdev);
3475 if (skdev->state == SKD_DRVR_STATE_FAULT ||
3476 skdev->state ==
3477 SKD_DRVR_STATE_DISAPPEARED) {
3478 spin_unlock(&skdev->lock);
3479 return rc;
3480 }
3481 }
3482
3483 if (intstat & FIT_ISH_MSG_FROM_DEV)
3484 skd_isr_msg_from_dev(skdev);
3485 }
3486 }
3487
3488 if (unlikely(flush_enqueued))
3489 skd_request_fn(skdev->queue);
3490
3491 if (deferred)
3492 schedule_work(&skdev->completion_worker);
3493 else if (!flush_enqueued)
3494 skd_request_fn(skdev->queue);
3495
3496 spin_unlock(&skdev->lock);
3497
3498 return rc;
3499}
3500
3501
3502static void skd_drive_fault(struct skd_device *skdev)
3503{
3504 skdev->state = SKD_DRVR_STATE_FAULT;
3505 pr_err("(%s): Drive FAULT\n", skd_name(skdev));
3506}
3507
3508static void skd_drive_disappeared(struct skd_device *skdev)
3509{
3510 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
3511 pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
3512}
3513
3514static void skd_isr_fwstate(struct skd_device *skdev)
3515{
3516 u32 sense;
3517 u32 state;
3518 u32 mtd;
3519 int prev_driver_state = skdev->state;
3520
3521 sense = SKD_READL(skdev, FIT_STATUS);
3522 state = sense & FIT_SR_DRIVE_STATE_MASK;
3523
3524 pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
3525 skd_name(skdev),
3526 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3527 skd_drive_state_to_str(state), state);
3528
3529 skdev->drive_state = state;
3530
3531 switch (skdev->drive_state) {
3532 case FIT_SR_DRIVE_INIT:
3533 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
3534 skd_disable_interrupts(skdev);
3535 break;
3536 }
3537 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
3538 skd_recover_requests(skdev, 0);
3539 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
3540 skdev->timer_countdown = SKD_STARTING_TIMO;
3541 skdev->state = SKD_DRVR_STATE_STARTING;
3542 skd_soft_reset(skdev);
3543 break;
3544 }
3545 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
3546 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3547 skdev->last_mtd = mtd;
3548 break;
3549
3550 case FIT_SR_DRIVE_ONLINE:
3551 skdev->cur_max_queue_depth = skd_max_queue_depth;
3552 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
3553 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
3554
3555 skdev->queue_low_water_mark =
3556 skdev->cur_max_queue_depth * 2 / 3 + 1;
3557 if (skdev->queue_low_water_mark < 1)
3558 skdev->queue_low_water_mark = 1;
3559 pr_info(
3560 "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
3561 skd_name(skdev),
3562 skdev->cur_max_queue_depth,
3563 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
3564
3565 skd_refresh_device_data(skdev);
3566 break;
3567
3568 case FIT_SR_DRIVE_BUSY:
3569 skdev->state = SKD_DRVR_STATE_BUSY;
3570 skdev->timer_countdown = SKD_BUSY_TIMO;
3571 skd_quiesce_dev(skdev);
3572 break;
3573 case FIT_SR_DRIVE_BUSY_SANITIZE:
3574 /* set timer for 3 seconds, we'll abort any unfinished
3575 * commands after that expires
3576 */
3577 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3578 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
3579 skd_start_queue(skdev);
3580 break;
3581 case FIT_SR_DRIVE_BUSY_ERASE:
3582 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3583 skdev->timer_countdown = SKD_BUSY_TIMO;
3584 break;
3585 case FIT_SR_DRIVE_OFFLINE:
3586 skdev->state = SKD_DRVR_STATE_IDLE;
3587 break;
3588 case FIT_SR_DRIVE_SOFT_RESET:
3589 switch (skdev->state) {
3590 case SKD_DRVR_STATE_STARTING:
3591 case SKD_DRVR_STATE_RESTARTING:
3592 /* Expected by a caller of skd_soft_reset() */
3593 break;
3594 default:
3595 skdev->state = SKD_DRVR_STATE_RESTARTING;
3596 break;
3597 }
3598 break;
3599 case FIT_SR_DRIVE_FW_BOOTING:
2e44b427 3600 pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
3601 skdev->name, __func__, __LINE__, skdev->name);
e67f86b3
AB
3602 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3603 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3604 break;
3605
3606 case FIT_SR_DRIVE_DEGRADED:
3607 case FIT_SR_PCIE_LINK_DOWN:
3608 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3609 break;
3610
3611 case FIT_SR_DRIVE_FAULT:
3612 skd_drive_fault(skdev);
3613 skd_recover_requests(skdev, 0);
3614 skd_start_queue(skdev);
3615 break;
3616
3617 /* PCIe bus returned all Fs? */
3618 case 0xFF:
3619 pr_info("(%s): state=0x%x sense=0x%x\n",
3620 skd_name(skdev), state, sense);
3621 skd_drive_disappeared(skdev);
3622 skd_recover_requests(skdev, 0);
3623 skd_start_queue(skdev);
3624 break;
3625 default:
3626 /*
3627 * Uknown FW State. Wait for a state we recognize.
3628 */
3629 break;
3630 }
3631 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3632 skd_name(skdev),
3633 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3634 skd_skdev_state_to_str(skdev->state), skdev->state);
3635}
3636
3637static void skd_recover_requests(struct skd_device *skdev, int requeue)
3638{
3639 int i;
3640
3641 for (i = 0; i < skdev->num_req_context; i++) {
3642 struct skd_request_context *skreq = &skdev->skreq_table[i];
3643
3644 if (skreq->state == SKD_REQ_STATE_BUSY) {
3645 skd_log_skreq(skdev, skreq, "recover");
3646
3647 SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3648 if (!skd_bio)
3649 SKD_ASSERT(skreq->req != NULL);
3650 else
3651 SKD_ASSERT(skreq->bio != NULL);
3652
3653 /* Release DMA resources for the request. */
3654 if (skreq->n_sg > 0)
3655 skd_postop_sg_list(skdev, skreq);
3656
3657 if (!skd_bio) {
3658 if (requeue &&
3659 (unsigned long) ++skreq->req->special <
3660 SKD_MAX_RETRIES)
3661 skd_requeue_request(skdev, skreq);
3662 else
3663 skd_end_request(skdev, skreq, -EIO);
3664 } else
3665 skd_end_request(skdev, skreq, -EIO);
3666
3667 if (!skd_bio)
3668 skreq->req = NULL;
3669 else
3670 skreq->bio = NULL;
3671
3672 skreq->state = SKD_REQ_STATE_IDLE;
3673 skreq->id += SKD_ID_INCR;
3674
3675
3676 }
3677 if (i > 0)
3678 skreq[-1].next = skreq;
3679 skreq->next = NULL;
3680 }
3681 skdev->skreq_free_list = skdev->skreq_table;
3682
3683 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3684 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3685
3686 if (skmsg->state == SKD_MSG_STATE_BUSY) {
3687 skd_log_skmsg(skdev, skmsg, "salvaged");
3688 SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3689 skmsg->state = SKD_MSG_STATE_IDLE;
3690 skmsg->id += SKD_ID_INCR;
3691 }
3692 if (i > 0)
3693 skmsg[-1].next = skmsg;
3694 skmsg->next = NULL;
3695 }
3696 skdev->skmsg_free_list = skdev->skmsg_table;
3697
3698 for (i = 0; i < skdev->n_special; i++) {
3699 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3700
3701 /* If orphaned, reclaim it because it has already been reported
3702 * to the process as an error (it was just waiting for
3703 * a completion that didn't come, and now it will never come)
3704 * If busy, change to a state that will cause it to error
3705 * out in the wait routine and let it do the normal
3706 * reporting and reclaiming
3707 */
3708 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3709 if (skspcl->orphaned) {
2e44b427 3710 pr_debug("%s:%s:%d orphaned %p\n",
3711 skdev->name, __func__, __LINE__,
3712 skspcl);
e67f86b3
AB
3713 skd_release_special(skdev, skspcl);
3714 } else {
2e44b427 3715 pr_debug("%s:%s:%d not orphaned %p\n",
3716 skdev->name, __func__, __LINE__,
3717 skspcl);
e67f86b3
AB
3718 skspcl->req.state = SKD_REQ_STATE_ABORTED;
3719 }
3720 }
3721 }
3722 skdev->skspcl_free_list = skdev->skspcl_table;
3723
3724 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3725 skdev->timeout_slot[i] = 0;
3726
3727 skdev->in_flight = 0;
3728}
3729
3730static void skd_isr_msg_from_dev(struct skd_device *skdev)
3731{
3732 u32 mfd;
3733 u32 mtd;
3734 u32 data;
3735
3736 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3737
2e44b427 3738 pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
3739 skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
e67f86b3
AB
3740
3741 /* ignore any mtd that is an ack for something we didn't send */
3742 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3743 return;
3744
3745 switch (FIT_MXD_TYPE(mfd)) {
3746 case FIT_MTD_FITFW_INIT:
3747 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3748
3749 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
3750 pr_err("(%s): protocol mismatch\n",
3751 skdev->name);
3752 pr_err("(%s): got=%d support=%d\n",
3753 skdev->name, skdev->proto_ver,
3754 FIT_PROTOCOL_VERSION_1);
3755 pr_err("(%s): please upgrade driver\n",
3756 skdev->name);
3757 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3758 skd_soft_reset(skdev);
3759 break;
3760 }
3761 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3762 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3763 skdev->last_mtd = mtd;
3764 break;
3765
3766 case FIT_MTD_GET_CMDQ_DEPTH:
3767 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3768 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3769 SKD_N_COMPLETION_ENTRY);
3770 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3771 skdev->last_mtd = mtd;
3772 break;
3773
3774 case FIT_MTD_SET_COMPQ_DEPTH:
3775 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3776 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3777 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3778 skdev->last_mtd = mtd;
3779 break;
3780
3781 case FIT_MTD_SET_COMPQ_ADDR:
3782 skd_reset_skcomp(skdev);
3783 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3784 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3785 skdev->last_mtd = mtd;
3786 break;
3787
3788 case FIT_MTD_CMD_LOG_HOST_ID:
3789 skdev->connect_time_stamp = get_seconds();
3790 data = skdev->connect_time_stamp & 0xFFFF;
3791 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3792 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3793 skdev->last_mtd = mtd;
3794 break;
3795
3796 case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3797 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3798 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3799 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3800 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3801 skdev->last_mtd = mtd;
3802 break;
3803
3804 case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3805 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3806 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3807 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3808 skdev->last_mtd = mtd;
3809
3810 pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
3811 skd_name(skdev),
3812 skdev->connect_time_stamp, skdev->drive_jiffies);
3813 break;
3814
3815 case FIT_MTD_ARM_QUEUE:
3816 skdev->last_mtd = 0;
3817 /*
3818 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3819 */
3820 break;
3821
3822 default:
3823 break;
3824 }
3825}
3826
3827static void skd_disable_interrupts(struct skd_device *skdev)
3828{
3829 u32 sense;
3830
3831 sense = SKD_READL(skdev, FIT_CONTROL);
3832 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3833 SKD_WRITEL(skdev, sense, FIT_CONTROL);
2e44b427 3834 pr_debug("%s:%s:%d sense 0x%x\n",
3835 skdev->name, __func__, __LINE__, sense);
e67f86b3
AB
3836
3837 /* Note that the 1s is written. A 1-bit means
3838 * disable, a 0 means enable.
3839 */
3840 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3841}
3842
3843static void skd_enable_interrupts(struct skd_device *skdev)
3844{
3845 u32 val;
3846
3847 /* unmask interrupts first */
3848 val = FIT_ISH_FW_STATE_CHANGE +
3849 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3850
3851 /* Note that the compliment of mask is written. A 1-bit means
3852 * disable, a 0 means enable. */
3853 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
2e44b427 3854 pr_debug("%s:%s:%d interrupt mask=0x%x\n",
3855 skdev->name, __func__, __LINE__, ~val);
e67f86b3
AB
3856
3857 val = SKD_READL(skdev, FIT_CONTROL);
3858 val |= FIT_CR_ENABLE_INTERRUPTS;
2e44b427 3859 pr_debug("%s:%s:%d control=0x%x\n",
3860 skdev->name, __func__, __LINE__, val);
e67f86b3
AB
3861 SKD_WRITEL(skdev, val, FIT_CONTROL);
3862}
3863
3864/*
3865 *****************************************************************************
3866 * START, STOP, RESTART, QUIESCE, UNQUIESCE
3867 *****************************************************************************
3868 */
3869
3870static void skd_soft_reset(struct skd_device *skdev)
3871{
3872 u32 val;
3873
3874 val = SKD_READL(skdev, FIT_CONTROL);
3875 val |= (FIT_CR_SOFT_RESET);
2e44b427 3876 pr_debug("%s:%s:%d control=0x%x\n",
3877 skdev->name, __func__, __LINE__, val);
e67f86b3
AB
3878 SKD_WRITEL(skdev, val, FIT_CONTROL);
3879}
3880
3881static void skd_start_device(struct skd_device *skdev)
3882{
3883 unsigned long flags;
3884 u32 sense;
3885 u32 state;
3886
3887 spin_lock_irqsave(&skdev->lock, flags);
3888
3889 /* ack all ghost interrupts */
3890 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3891
3892 sense = SKD_READL(skdev, FIT_STATUS);
3893
2e44b427 3894 pr_debug("%s:%s:%d initial status=0x%x\n",
3895 skdev->name, __func__, __LINE__, sense);
e67f86b3
AB
3896
3897 state = sense & FIT_SR_DRIVE_STATE_MASK;
3898 skdev->drive_state = state;
3899 skdev->last_mtd = 0;
3900
3901 skdev->state = SKD_DRVR_STATE_STARTING;
3902 skdev->timer_countdown = SKD_STARTING_TIMO;
3903
3904 skd_enable_interrupts(skdev);
3905
3906 switch (skdev->drive_state) {
3907 case FIT_SR_DRIVE_OFFLINE:
3908 pr_err("(%s): Drive offline...\n", skd_name(skdev));
3909 break;
3910
3911 case FIT_SR_DRIVE_FW_BOOTING:
2e44b427 3912 pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
3913 skdev->name, __func__, __LINE__, skdev->name);
e67f86b3
AB
3914 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3915 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3916 break;
3917
3918 case FIT_SR_DRIVE_BUSY_SANITIZE:
3919 pr_info("(%s): Start: BUSY_SANITIZE\n",
3920 skd_name(skdev));
3921 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3922 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3923 break;
3924
3925 case FIT_SR_DRIVE_BUSY_ERASE:
3926 pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
3927 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3928 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3929 break;
3930
3931 case FIT_SR_DRIVE_INIT:
3932 case FIT_SR_DRIVE_ONLINE:
3933 skd_soft_reset(skdev);
3934 break;
3935
3936 case FIT_SR_DRIVE_BUSY:
3937 pr_err("(%s): Drive Busy...\n", skd_name(skdev));
3938 skdev->state = SKD_DRVR_STATE_BUSY;
3939 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3940 break;
3941
3942 case FIT_SR_DRIVE_SOFT_RESET:
3943 pr_err("(%s) drive soft reset in prog\n",
3944 skd_name(skdev));
3945 break;
3946
3947 case FIT_SR_DRIVE_FAULT:
3948 /* Fault state is bad...soft reset won't do it...
3949 * Hard reset, maybe, but does it work on device?
3950 * For now, just fault so the system doesn't hang.
3951 */
3952 skd_drive_fault(skdev);
3953 /*start the queue so we can respond with error to requests */
2e44b427 3954 pr_debug("%s:%s:%d starting %s queue\n",
3955 skdev->name, __func__, __LINE__, skdev->name);
e67f86b3
AB
3956 skd_start_queue(skdev);
3957 skdev->gendisk_on = -1;
3958 wake_up_interruptible(&skdev->waitq);
3959 break;
3960
3961 case 0xFF:
3962 /* Most likely the device isn't there or isn't responding
3963 * to the BAR1 addresses. */
3964 skd_drive_disappeared(skdev);
3965 /*start the queue so we can respond with error to requests */
2e44b427 3966 pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
3967 skdev->name, __func__, __LINE__, skdev->name);
e67f86b3
AB
3968 skd_start_queue(skdev);
3969 skdev->gendisk_on = -1;
3970 wake_up_interruptible(&skdev->waitq);
3971 break;
3972
3973 default:
3974 pr_err("(%s) Start: unknown state %x\n",
3975 skd_name(skdev), skdev->drive_state);
3976 break;
3977 }
3978
3979 state = SKD_READL(skdev, FIT_CONTROL);
2e44b427 3980 pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
3981 skdev->name, __func__, __LINE__, state);
e67f86b3
AB
3982
3983 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2e44b427 3984 pr_debug("%s:%s:%d Intr Status=0x%x\n",
3985 skdev->name, __func__, __LINE__, state);
e67f86b3
AB
3986
3987 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
2e44b427 3988 pr_debug("%s:%s:%d Intr Mask=0x%x\n",
3989 skdev->name, __func__, __LINE__, state);
e67f86b3
AB
3990
3991 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2e44b427 3992 pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
3993 skdev->name, __func__, __LINE__, state);
e67f86b3
AB
3994
3995 state = SKD_READL(skdev, FIT_HW_VERSION);
2e44b427 3996 pr_debug("%s:%s:%d HW version=0x%x\n",
3997 skdev->name, __func__, __LINE__, state);
e67f86b3
AB
3998
3999 spin_unlock_irqrestore(&skdev->lock, flags);
4000}
4001
4002static void skd_stop_device(struct skd_device *skdev)
4003{
4004 unsigned long flags;
4005 struct skd_special_context *skspcl = &skdev->internal_skspcl;
4006 u32 dev_state;
4007 int i;
4008
4009 spin_lock_irqsave(&skdev->lock, flags);
4010
4011 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
4012 pr_err("(%s): skd_stop_device not online no sync\n",
4013 skd_name(skdev));
4014 goto stop_out;
4015 }
4016
4017 if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
4018 pr_err("(%s): skd_stop_device no special\n",
4019 skd_name(skdev));
4020 goto stop_out;
4021 }
4022
4023 skdev->state = SKD_DRVR_STATE_SYNCING;
4024 skdev->sync_done = 0;
4025
4026 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
4027
4028 spin_unlock_irqrestore(&skdev->lock, flags);
4029
4030 wait_event_interruptible_timeout(skdev->waitq,
4031 (skdev->sync_done), (10 * HZ));
4032
4033 spin_lock_irqsave(&skdev->lock, flags);
4034
4035 switch (skdev->sync_done) {
4036 case 0:
4037 pr_err("(%s): skd_stop_device no sync\n",
4038 skd_name(skdev));
4039 break;
4040 case 1:
4041 pr_err("(%s): skd_stop_device sync done\n",
4042 skd_name(skdev));
4043 break;
4044 default:
4045 pr_err("(%s): skd_stop_device sync error\n",
4046 skd_name(skdev));
4047 }
4048
4049stop_out:
4050 skdev->state = SKD_DRVR_STATE_STOPPING;
4051 spin_unlock_irqrestore(&skdev->lock, flags);
4052
4053 skd_kill_timer(skdev);
4054
4055 spin_lock_irqsave(&skdev->lock, flags);
4056 skd_disable_interrupts(skdev);
4057
4058 /* ensure all ints on device are cleared */
4059 /* soft reset the device to unload with a clean slate */
4060 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
4061 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
4062
4063 spin_unlock_irqrestore(&skdev->lock, flags);
4064
4065 /* poll every 100ms, 1 second timeout */
4066 for (i = 0; i < 10; i++) {
4067 dev_state =
4068 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
4069 if (dev_state == FIT_SR_DRIVE_INIT)
4070 break;
4071 set_current_state(TASK_INTERRUPTIBLE);
4072 schedule_timeout(msecs_to_jiffies(100));
4073 }
4074
4075 if (dev_state != FIT_SR_DRIVE_INIT)
4076 pr_err("(%s): skd_stop_device state error 0x%02x\n",
4077 skd_name(skdev), dev_state);
4078}
4079
4080/* assume spinlock is held */
4081static void skd_restart_device(struct skd_device *skdev)
4082{
4083 u32 state;
4084
4085 /* ack all ghost interrupts */
4086 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
4087
4088 state = SKD_READL(skdev, FIT_STATUS);
4089
2e44b427 4090 pr_debug("%s:%s:%d drive status=0x%x\n",
4091 skdev->name, __func__, __LINE__, state);
e67f86b3
AB
4092
4093 state &= FIT_SR_DRIVE_STATE_MASK;
4094 skdev->drive_state = state;
4095 skdev->last_mtd = 0;
4096
4097 skdev->state = SKD_DRVR_STATE_RESTARTING;
4098 skdev->timer_countdown = SKD_RESTARTING_TIMO;
4099
4100 skd_soft_reset(skdev);
4101}
4102
4103/* assume spinlock is held */
4104static int skd_quiesce_dev(struct skd_device *skdev)
4105{
4106 int rc = 0;
4107
4108 switch (skdev->state) {
4109 case SKD_DRVR_STATE_BUSY:
4110 case SKD_DRVR_STATE_BUSY_IMMINENT:
2e44b427 4111 pr_debug("%s:%s:%d stopping %s queue\n",
4112 skdev->name, __func__, __LINE__, skdev->name);
e67f86b3
AB
4113 skd_stop_queue(skdev);
4114 break;
4115 case SKD_DRVR_STATE_ONLINE:
4116 case SKD_DRVR_STATE_STOPPING:
4117 case SKD_DRVR_STATE_SYNCING:
4118 case SKD_DRVR_STATE_PAUSING:
4119 case SKD_DRVR_STATE_PAUSED:
4120 case SKD_DRVR_STATE_STARTING:
4121 case SKD_DRVR_STATE_RESTARTING:
4122 case SKD_DRVR_STATE_RESUMING:
4123 default:
4124 rc = -EINVAL;
2e44b427 4125 pr_debug("%s:%s:%d state [%d] not implemented\n",
4126 skdev->name, __func__, __LINE__, skdev->state);
e67f86b3
AB
4127 }
4128 return rc;
4129}
4130
4131/* assume spinlock is held */
4132static int skd_unquiesce_dev(struct skd_device *skdev)
4133{
4134 int prev_driver_state = skdev->state;
4135
4136 skd_log_skdev(skdev, "unquiesce");
4137 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
2e44b427 4138 pr_debug("%s:%s:%d **** device already ONLINE\n",
4139 skdev->name, __func__, __LINE__);
e67f86b3
AB
4140 return 0;
4141 }
4142 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
4143 /*
4144 * If there has been an state change to other than
4145 * ONLINE, we will rely on controller state change
4146 * to come back online and restart the queue.
4147 * The BUSY state means that driver is ready to
4148 * continue normal processing but waiting for controller
4149 * to become available.
4150 */
4151 skdev->state = SKD_DRVR_STATE_BUSY;
2e44b427 4152 pr_debug("%s:%s:%d drive BUSY state\n",
4153 skdev->name, __func__, __LINE__);
e67f86b3
AB
4154 return 0;
4155 }
4156
4157 /*
4158 * Drive has just come online, driver is either in startup,
4159 * paused performing a task, or bust waiting for hardware.
4160 */
4161 switch (skdev->state) {
4162 case SKD_DRVR_STATE_PAUSED:
4163 case SKD_DRVR_STATE_BUSY:
4164 case SKD_DRVR_STATE_BUSY_IMMINENT:
4165 case SKD_DRVR_STATE_BUSY_ERASE:
4166 case SKD_DRVR_STATE_STARTING:
4167 case SKD_DRVR_STATE_RESTARTING:
4168 case SKD_DRVR_STATE_FAULT:
4169 case SKD_DRVR_STATE_IDLE:
4170 case SKD_DRVR_STATE_LOAD:
4171 skdev->state = SKD_DRVR_STATE_ONLINE;
4172 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
4173 skd_name(skdev),
4174 skd_skdev_state_to_str(prev_driver_state),
4175 prev_driver_state, skd_skdev_state_to_str(skdev->state),
4176 skdev->state);
2e44b427 4177 pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
4178 skdev->name, __func__, __LINE__);
4179 pr_debug("%s:%s:%d starting %s queue\n",
4180 skdev->name, __func__, __LINE__, skdev->name);
e67f86b3
AB
4181 pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
4182 skd_start_queue(skdev);
4183 skdev->gendisk_on = 1;
4184 wake_up_interruptible(&skdev->waitq);
4185 break;
4186
4187 case SKD_DRVR_STATE_DISAPPEARED:
4188 default:
2e44b427 4189 pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
4190 skdev->name, __func__, __LINE__,
4191 skdev->state);
e67f86b3
AB
4192 return -EBUSY;
4193 }
4194 return 0;
4195}
4196
4197/*
4198 *****************************************************************************
4199 * PCIe MSI/MSI-X INTERRUPT HANDLERS
4200 *****************************************************************************
4201 */
4202
4203static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
4204{
4205 struct skd_device *skdev = skd_host_data;
4206 unsigned long flags;
4207
4208 spin_lock_irqsave(&skdev->lock, flags);
2e44b427 4209 pr_debug("%s:%s:%d MSIX = 0x%x\n",
4210 skdev->name, __func__, __LINE__,
4211 SKD_READL(skdev, FIT_INT_STATUS_HOST));
e67f86b3
AB
4212 pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
4213 irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
4214 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
4215 spin_unlock_irqrestore(&skdev->lock, flags);
4216 return IRQ_HANDLED;
4217}
4218
4219static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
4220{
4221 struct skd_device *skdev = skd_host_data;
4222 unsigned long flags;
4223
4224 spin_lock_irqsave(&skdev->lock, flags);
2e44b427 4225 pr_debug("%s:%s:%d MSIX = 0x%x\n",
4226 skdev->name, __func__, __LINE__,
4227 SKD_READL(skdev, FIT_INT_STATUS_HOST));
e67f86b3
AB
4228 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
4229 skd_isr_fwstate(skdev);
4230 spin_unlock_irqrestore(&skdev->lock, flags);
4231 return IRQ_HANDLED;
4232}
4233
4234static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
4235{
4236 struct skd_device *skdev = skd_host_data;
4237 unsigned long flags;
4238 int flush_enqueued = 0;
4239 int deferred;
4240
4241 spin_lock_irqsave(&skdev->lock, flags);
2e44b427 4242 pr_debug("%s:%s:%d MSIX = 0x%x\n",
4243 skdev->name, __func__, __LINE__,
4244 SKD_READL(skdev, FIT_INT_STATUS_HOST));
e67f86b3
AB
4245 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
4246 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
4247 &flush_enqueued);
4248
4249 if (flush_enqueued)
4250 skd_request_fn(skdev->queue);
4251
4252 if (deferred)
4253 schedule_work(&skdev->completion_worker);
4254 else if (!flush_enqueued)
4255 skd_request_fn(skdev->queue);
4256
4257 spin_unlock_irqrestore(&skdev->lock, flags);
4258
4259 return IRQ_HANDLED;
4260}
4261
4262static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
4263{
4264 struct skd_device *skdev = skd_host_data;
4265 unsigned long flags;
4266
4267 spin_lock_irqsave(&skdev->lock, flags);
2e44b427 4268 pr_debug("%s:%s:%d MSIX = 0x%x\n",
4269 skdev->name, __func__, __LINE__,
4270 SKD_READL(skdev, FIT_INT_STATUS_HOST));
e67f86b3
AB
4271 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
4272 skd_isr_msg_from_dev(skdev);
4273 spin_unlock_irqrestore(&skdev->lock, flags);
4274 return IRQ_HANDLED;
4275}
4276
4277static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
4278{
4279 struct skd_device *skdev = skd_host_data;
4280 unsigned long flags;
4281
4282 spin_lock_irqsave(&skdev->lock, flags);
2e44b427 4283 pr_debug("%s:%s:%d MSIX = 0x%x\n",
4284 skdev->name, __func__, __LINE__,
4285 SKD_READL(skdev, FIT_INT_STATUS_HOST));
e67f86b3
AB
4286 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
4287 spin_unlock_irqrestore(&skdev->lock, flags);
4288 return IRQ_HANDLED;
4289}
4290
4291/*
4292 *****************************************************************************
4293 * PCIe MSI/MSI-X SETUP
4294 *****************************************************************************
4295 */
4296
4297struct skd_msix_entry {
4298 int have_irq;
4299 u32 vector;
4300 u32 entry;
4301 struct skd_device *rsp;
4302 char isr_name[30];
4303};
4304
4305struct skd_init_msix_entry {
4306 const char *name;
4307 irq_handler_t handler;
4308};
4309
4310#define SKD_MAX_MSIX_COUNT 13
4311#define SKD_MIN_MSIX_COUNT 7
4312#define SKD_BASE_MSIX_IRQ 4
4313
4314static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
4315 { "(DMA 0)", skd_reserved_isr },
4316 { "(DMA 1)", skd_reserved_isr },
4317 { "(DMA 2)", skd_reserved_isr },
4318 { "(DMA 3)", skd_reserved_isr },
4319 { "(State Change)", skd_statec_isr },
4320 { "(COMPL_Q)", skd_comp_q },
4321 { "(MSG)", skd_msg_isr },
4322 { "(Reserved)", skd_reserved_isr },
4323 { "(Reserved)", skd_reserved_isr },
4324 { "(Queue Full 0)", skd_qfull_isr },
4325 { "(Queue Full 1)", skd_qfull_isr },
4326 { "(Queue Full 2)", skd_qfull_isr },
4327 { "(Queue Full 3)", skd_qfull_isr },
4328};
4329
4330static void skd_release_msix(struct skd_device *skdev)
4331{
4332 struct skd_msix_entry *qentry;
4333 int i;
4334
4335 if (skdev->msix_entries == NULL)
4336 return;
4337 for (i = 0; i < skdev->msix_count; i++) {
4338 qentry = &skdev->msix_entries[i];
4339 skdev = qentry->rsp;
4340
4341 if (qentry->have_irq)
4342 devm_free_irq(&skdev->pdev->dev,
4343 qentry->vector, qentry->rsp);
4344 }
4345 pci_disable_msix(skdev->pdev);
4346 kfree(skdev->msix_entries);
4347 skdev->msix_count = 0;
4348 skdev->msix_entries = NULL;
4349}
4350
4351static int skd_acquire_msix(struct skd_device *skdev)
4352{
4353 int i, rc;
4354 struct pci_dev *pdev;
4355 struct msix_entry *entries = NULL;
4356 struct skd_msix_entry *qentry;
4357
4358 pdev = skdev->pdev;
4359 skdev->msix_count = SKD_MAX_MSIX_COUNT;
4360 entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
4361 GFP_KERNEL);
4362 if (!entries)
4363 return -ENOMEM;
4364
4365 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
4366 entries[i].entry = i;
4367
4368 rc = pci_enable_msix(pdev, entries, SKD_MAX_MSIX_COUNT);
4369 if (rc < 0)
4370 goto msix_out;
4371 if (rc) {
4372 if (rc < SKD_MIN_MSIX_COUNT) {
4373 pr_err("(%s): failed to enable MSI-X %d\n",
4374 skd_name(skdev), rc);
4375 goto msix_out;
4376 }
2e44b427 4377 pr_debug("%s:%s:%d %s: <%s> allocated %d MSI-X vectors\n",
4378 skdev->name, __func__, __LINE__,
4379 pci_name(pdev), skdev->name, rc);
e67f86b3
AB
4380
4381 skdev->msix_count = rc;
4382 rc = pci_enable_msix(pdev, entries, skdev->msix_count);
4383 if (rc) {
4384 pr_err("(%s): failed to enable MSI-X "
4385 "support (%d) %d\n",
4386 skd_name(skdev), skdev->msix_count, rc);
4387 goto msix_out;
4388 }
4389 }
4390 skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
4391 skdev->msix_count, GFP_KERNEL);
4392 if (!skdev->msix_entries) {
4393 rc = -ENOMEM;
4394 skdev->msix_count = 0;
4395 pr_err("(%s): msix table allocation error\n",
4396 skd_name(skdev));
4397 goto msix_out;
4398 }
4399
4400 qentry = skdev->msix_entries;
4401 for (i = 0; i < skdev->msix_count; i++) {
4402 qentry->vector = entries[i].vector;
4403 qentry->entry = entries[i].entry;
4404 qentry->rsp = NULL;
4405 qentry->have_irq = 0;
2e44b427 4406 pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
4407 skdev->name, __func__, __LINE__,
4408 pci_name(pdev), skdev->name,
4409 i, qentry->vector, qentry->entry);
e67f86b3
AB
4410 qentry++;
4411 }
4412
4413 /* Enable MSI-X vectors for the base queue */
4414 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
4415 qentry = &skdev->msix_entries[i];
4416 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
4417 "%s%d-msix %s", DRV_NAME, skdev->devno,
4418 msix_entries[i].name);
4419 rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
4420 msix_entries[i].handler, 0,
4421 qentry->isr_name, skdev);
4422 if (rc) {
4423 pr_err("(%s): Unable to register(%d) MSI-X "
4424 "handler %d: %s\n",
4425 skd_name(skdev), rc, i, qentry->isr_name);
4426 goto msix_out;
4427 } else {
4428 qentry->have_irq = 1;
4429 qentry->rsp = skdev;
4430 }
4431 }
2e44b427 4432 pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
4433 skdev->name, __func__, __LINE__,
4434 pci_name(pdev), skdev->name, skdev->msix_count);
e67f86b3
AB
4435 return 0;
4436
4437msix_out:
4438 if (entries)
4439 kfree(entries);
4440 skd_release_msix(skdev);
4441 return rc;
4442}
4443
4444static int skd_acquire_irq(struct skd_device *skdev)
4445{
4446 int rc;
4447 struct pci_dev *pdev;
4448
4449 pdev = skdev->pdev;
4450 skdev->msix_count = 0;
4451
4452RETRY_IRQ_TYPE:
4453 switch (skdev->irq_type) {
4454 case SKD_IRQ_MSIX:
4455 rc = skd_acquire_msix(skdev);
4456 if (!rc)
4457 pr_info("(%s): MSI-X %d irqs enabled\n",
4458 skd_name(skdev), skdev->msix_count);
4459 else {
4460 pr_err(
4461 "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
4462 skd_name(skdev), rc);
4463 skdev->irq_type = SKD_IRQ_MSI;
4464 goto RETRY_IRQ_TYPE;
4465 }
4466 break;
4467 case SKD_IRQ_MSI:
4468 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
4469 DRV_NAME, skdev->devno);
4470 rc = pci_enable_msi(pdev);
4471 if (!rc) {
4472 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
4473 skdev->isr_name, skdev);
4474 if (rc) {
4475 pci_disable_msi(pdev);
4476 pr_err(
4477 "(%s): failed to allocate the MSI interrupt %d\n",
4478 skd_name(skdev), rc);
4479 goto RETRY_IRQ_LEGACY;
4480 }
4481 pr_info("(%s): MSI irq %d enabled\n",
4482 skd_name(skdev), pdev->irq);
4483 } else {
4484RETRY_IRQ_LEGACY:
4485 pr_err(
4486 "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
4487 skd_name(skdev), rc);
4488 skdev->irq_type = SKD_IRQ_LEGACY;
4489 goto RETRY_IRQ_TYPE;
4490 }
4491 break;
4492 case SKD_IRQ_LEGACY:
4493 snprintf(skdev->isr_name, sizeof(skdev->isr_name),
4494 "%s%d-legacy", DRV_NAME, skdev->devno);
4495 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
4496 IRQF_SHARED, skdev->isr_name, skdev);
4497 if (!rc)
4498 pr_info("(%s): LEGACY irq %d enabled\n",
4499 skd_name(skdev), pdev->irq);
4500 else
4501 pr_err("(%s): request LEGACY irq error %d\n",
4502 skd_name(skdev), rc);
4503 break;
4504 default:
4505 pr_info("(%s): irq_type %d invalid, re-set to %d\n",
4506 skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
4507 skdev->irq_type = SKD_IRQ_LEGACY;
4508 goto RETRY_IRQ_TYPE;
4509 }
4510 return rc;
4511}
4512
4513static void skd_release_irq(struct skd_device *skdev)
4514{
4515 switch (skdev->irq_type) {
4516 case SKD_IRQ_MSIX:
4517 skd_release_msix(skdev);
4518 break;
4519 case SKD_IRQ_MSI:
4520 devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
4521 pci_disable_msi(skdev->pdev);
4522 break;
4523 case SKD_IRQ_LEGACY:
4524 devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
4525 break;
4526 default:
4527 pr_err("(%s): wrong irq type %d!",
4528 skd_name(skdev), skdev->irq_type);
4529 break;
4530 }
4531}
4532
4533/*
4534 *****************************************************************************
4535 * CONSTRUCT
4536 *****************************************************************************
4537 */
4538
4539static int skd_cons_skcomp(struct skd_device *skdev);
4540static int skd_cons_skmsg(struct skd_device *skdev);
4541static int skd_cons_skreq(struct skd_device *skdev);
4542static int skd_cons_skspcl(struct skd_device *skdev);
4543static int skd_cons_sksb(struct skd_device *skdev);
4544static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
4545 u32 n_sg,
4546 dma_addr_t *ret_dma_addr);
4547static int skd_cons_disk(struct skd_device *skdev);
4548
4549#define SKD_N_DEV_TABLE 16u
4550static u32 skd_next_devno;
4551
4552static struct skd_device *skd_construct(struct pci_dev *pdev)
4553{
4554 struct skd_device *skdev;
4555 int blk_major = skd_major;
4556 int rc;
4557
4558 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4559
4560 if (!skdev) {
4561 pr_err(PFX "(%s): memory alloc failure\n",
4562 pci_name(pdev));
4563 return NULL;
4564 }
4565
4566 skdev->state = SKD_DRVR_STATE_LOAD;
4567 skdev->pdev = pdev;
4568 skdev->devno = skd_next_devno++;
4569 skdev->major = blk_major;
4570 skdev->irq_type = skd_isr_type;
4571 sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
4572 skdev->dev_max_queue_depth = 0;
4573
4574 skdev->num_req_context = skd_max_queue_depth;
4575 skdev->num_fitmsg_context = skd_max_queue_depth;
4576 skdev->n_special = skd_max_pass_thru;
4577 skdev->cur_max_queue_depth = 1;
4578 skdev->queue_low_water_mark = 1;
4579 skdev->proto_ver = 99;
4580 skdev->sgs_per_request = skd_sgs_per_request;
4581 skdev->dbg_level = skd_dbg_level;
4582
4583 if (skd_bio)
4584 bio_list_init(&skdev->bio_queue);
4585
4586
4587 atomic_set(&skdev->device_count, 0);
4588
4589 spin_lock_init(&skdev->lock);
4590
4591 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4592 INIT_LIST_HEAD(&skdev->flush_list);
4593
2e44b427 4594 pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
4595 rc = skd_cons_skcomp(skdev);
4596 if (rc < 0)
4597 goto err_out;
4598
2e44b427 4599 pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
4600 rc = skd_cons_skmsg(skdev);
4601 if (rc < 0)
4602 goto err_out;
4603
2e44b427 4604 pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
4605 rc = skd_cons_skreq(skdev);
4606 if (rc < 0)
4607 goto err_out;
4608
2e44b427 4609 pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
4610 rc = skd_cons_skspcl(skdev);
4611 if (rc < 0)
4612 goto err_out;
4613
2e44b427 4614 pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
4615 rc = skd_cons_sksb(skdev);
4616 if (rc < 0)
4617 goto err_out;
4618
2e44b427 4619 pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
4620 rc = skd_cons_disk(skdev);
4621 if (rc < 0)
4622 goto err_out;
4623
2e44b427 4624 pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
4625 return skdev;
4626
4627err_out:
2e44b427 4628 pr_debug("%s:%s:%d construct failed\n",
4629 skdev->name, __func__, __LINE__);
e67f86b3
AB
4630 skd_destruct(skdev);
4631 return NULL;
4632}
4633
4634static int skd_cons_skcomp(struct skd_device *skdev)
4635{
4636 int rc = 0;
4637 struct fit_completion_entry_v1 *skcomp;
4638 u32 nbytes;
4639
4640 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
4641 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
4642
2e44b427 4643 pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
4644 skdev->name, __func__, __LINE__,
4645 nbytes, SKD_N_COMPLETION_ENTRY);
e67f86b3
AB
4646
4647 skcomp = pci_alloc_consistent(skdev->pdev, nbytes,
4648 &skdev->cq_dma_address);
4649
4650 if (skcomp == NULL) {
4651 rc = -ENOMEM;
4652 goto err_out;
4653 }
4654
4655 memset(skcomp, 0, nbytes);
4656
4657 skdev->skcomp_table = skcomp;
4658 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
4659 sizeof(*skcomp) *
4660 SKD_N_COMPLETION_ENTRY);
4661
4662err_out:
4663 return rc;
4664}
4665
4666static int skd_cons_skmsg(struct skd_device *skdev)
4667{
4668 int rc = 0;
4669 u32 i;
4670
2e44b427 4671 pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
4672 skdev->name, __func__, __LINE__,
4673 sizeof(struct skd_fitmsg_context),
4674 skdev->num_fitmsg_context,
4675 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
e67f86b3
AB
4676
4677 skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
4678 *skdev->num_fitmsg_context, GFP_KERNEL);
4679 if (skdev->skmsg_table == NULL) {
4680 rc = -ENOMEM;
4681 goto err_out;
4682 }
4683
4684 for (i = 0; i < skdev->num_fitmsg_context; i++) {
4685 struct skd_fitmsg_context *skmsg;
4686
4687 skmsg = &skdev->skmsg_table[i];
4688
4689 skmsg->id = i + SKD_ID_FIT_MSG;
4690
4691 skmsg->state = SKD_MSG_STATE_IDLE;
4692 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
4693 SKD_N_FITMSG_BYTES + 64,
4694 &skmsg->mb_dma_address);
4695
4696 if (skmsg->msg_buf == NULL) {
4697 rc = -ENOMEM;
4698 goto err_out;
4699 }
4700
4701 skmsg->offset = (u32)((u64)skmsg->msg_buf &
4702 (~FIT_QCMD_BASE_ADDRESS_MASK));
4703 skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
4704 skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
4705 FIT_QCMD_BASE_ADDRESS_MASK);
4706 skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
4707 skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
4708 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
4709
4710 skmsg->next = &skmsg[1];
4711 }
4712
4713 /* Free list is in order starting with the 0th entry. */
4714 skdev->skmsg_table[i - 1].next = NULL;
4715 skdev->skmsg_free_list = skdev->skmsg_table;
4716
4717err_out:
4718 return rc;
4719}
4720
4721static int skd_cons_skreq(struct skd_device *skdev)
4722{
4723 int rc = 0;
4724 u32 i;
4725
2e44b427 4726 pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
4727 skdev->name, __func__, __LINE__,
4728 sizeof(struct skd_request_context),
4729 skdev->num_req_context,
4730 sizeof(struct skd_request_context) * skdev->num_req_context);
e67f86b3
AB
4731
4732 skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
4733 * skdev->num_req_context, GFP_KERNEL);
4734 if (skdev->skreq_table == NULL) {
4735 rc = -ENOMEM;
4736 goto err_out;
4737 }
4738
2e44b427 4739 pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
4740 skdev->name, __func__, __LINE__,
4741 skdev->sgs_per_request, sizeof(struct scatterlist),
4742 skdev->sgs_per_request * sizeof(struct scatterlist));
e67f86b3
AB
4743
4744 for (i = 0; i < skdev->num_req_context; i++) {
4745 struct skd_request_context *skreq;
4746
4747 skreq = &skdev->skreq_table[i];
4748
4749 skreq->id = i + SKD_ID_RW_REQUEST;
4750 skreq->state = SKD_REQ_STATE_IDLE;
4751
4752 skreq->sg = kzalloc(sizeof(struct scatterlist) *
4753 skdev->sgs_per_request, GFP_KERNEL);
4754 if (skreq->sg == NULL) {
4755 rc = -ENOMEM;
4756 goto err_out;
4757 }
4758 sg_init_table(skreq->sg, skdev->sgs_per_request);
4759
4760 skreq->sksg_list = skd_cons_sg_list(skdev,
4761 skdev->sgs_per_request,
4762 &skreq->sksg_dma_address);
4763
4764 if (skreq->sksg_list == NULL) {
4765 rc = -ENOMEM;
4766 goto err_out;
4767 }
4768
4769 skreq->next = &skreq[1];
4770 }
4771
4772 /* Free list is in order starting with the 0th entry. */
4773 skdev->skreq_table[i - 1].next = NULL;
4774 skdev->skreq_free_list = skdev->skreq_table;
4775
4776err_out:
4777 return rc;
4778}
4779
4780static int skd_cons_skspcl(struct skd_device *skdev)
4781{
4782 int rc = 0;
4783 u32 i, nbytes;
4784
2e44b427 4785 pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4786 skdev->name, __func__, __LINE__,
4787 sizeof(struct skd_special_context),
4788 skdev->n_special,
4789 sizeof(struct skd_special_context) * skdev->n_special);
e67f86b3
AB
4790
4791 skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4792 * skdev->n_special, GFP_KERNEL);
4793 if (skdev->skspcl_table == NULL) {
4794 rc = -ENOMEM;
4795 goto err_out;
4796 }
4797
4798 for (i = 0; i < skdev->n_special; i++) {
4799 struct skd_special_context *skspcl;
4800
4801 skspcl = &skdev->skspcl_table[i];
4802
4803 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4804 skspcl->req.state = SKD_REQ_STATE_IDLE;
4805
4806 skspcl->req.next = &skspcl[1].req;
4807
4808 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4809
4810 skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
4811 &skspcl->mb_dma_address);
4812 if (skspcl->msg_buf == NULL) {
4813 rc = -ENOMEM;
4814 goto err_out;
4815 }
4816
4817 memset(skspcl->msg_buf, 0, nbytes);
4818
4819 skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4820 SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4821 if (skspcl->req.sg == NULL) {
4822 rc = -ENOMEM;
4823 goto err_out;
4824 }
4825
4826 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4827 SKD_N_SG_PER_SPECIAL,
4828 &skspcl->req.
4829 sksg_dma_address);
4830 if (skspcl->req.sksg_list == NULL) {
4831 rc = -ENOMEM;
4832 goto err_out;
4833 }
4834 }
4835
4836 /* Free list is in order starting with the 0th entry. */
4837 skdev->skspcl_table[i - 1].req.next = NULL;
4838 skdev->skspcl_free_list = skdev->skspcl_table;
4839
4840 return rc;
4841
4842err_out:
4843 return rc;
4844}
4845
4846static int skd_cons_sksb(struct skd_device *skdev)
4847{
4848 int rc = 0;
4849 struct skd_special_context *skspcl;
4850 u32 nbytes;
4851
4852 skspcl = &skdev->internal_skspcl;
4853
4854 skspcl->req.id = 0 + SKD_ID_INTERNAL;
4855 skspcl->req.state = SKD_REQ_STATE_IDLE;
4856
4857 nbytes = SKD_N_INTERNAL_BYTES;
4858
4859 skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes,
4860 &skspcl->db_dma_address);
4861 if (skspcl->data_buf == NULL) {
4862 rc = -ENOMEM;
4863 goto err_out;
4864 }
4865
4866 memset(skspcl->data_buf, 0, nbytes);
4867
4868 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4869 skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
4870 &skspcl->mb_dma_address);
4871 if (skspcl->msg_buf == NULL) {
4872 rc = -ENOMEM;
4873 goto err_out;
4874 }
4875
4876 memset(skspcl->msg_buf, 0, nbytes);
4877
4878 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4879 &skspcl->req.sksg_dma_address);
4880 if (skspcl->req.sksg_list == NULL) {
4881 rc = -ENOMEM;
4882 goto err_out;
4883 }
4884
4885 if (!skd_format_internal_skspcl(skdev)) {
4886 rc = -EINVAL;
4887 goto err_out;
4888 }
4889
4890err_out:
4891 return rc;
4892}
4893
4894static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
4895 u32 n_sg,
4896 dma_addr_t *ret_dma_addr)
4897{
4898 struct fit_sg_descriptor *sg_list;
4899 u32 nbytes;
4900
4901 nbytes = sizeof(*sg_list) * n_sg;
4902
4903 sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
4904
4905 if (sg_list != NULL) {
4906 uint64_t dma_address = *ret_dma_addr;
4907 u32 i;
4908
4909 memset(sg_list, 0, nbytes);
4910
4911 for (i = 0; i < n_sg - 1; i++) {
4912 uint64_t ndp_off;
4913 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
4914
4915 sg_list[i].next_desc_ptr = dma_address + ndp_off;
4916 }
4917 sg_list[i].next_desc_ptr = 0LL;
4918 }
4919
4920 return sg_list;
4921}
4922
4923static int skd_cons_disk(struct skd_device *skdev)
4924{
4925 int rc = 0;
4926 struct gendisk *disk;
4927 struct request_queue *q;
4928 unsigned long flags;
4929
4930 disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4931 if (!disk) {
4932 rc = -ENOMEM;
4933 goto err_out;
4934 }
4935
4936 skdev->disk = disk;
4937 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4938
4939 disk->major = skdev->major;
4940 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4941 disk->fops = &skd_blockdev_ops;
4942 disk->private_data = skdev;
4943
4944 if (!skd_bio) {
4945 q = blk_init_queue(skd_request_fn, &skdev->lock);
4946 } else {
4947 q = blk_alloc_queue(GFP_KERNEL);
4948 q->queue_flags = QUEUE_FLAG_IO_STAT | QUEUE_FLAG_STACKABLE;
4949 }
4950
4951 if (!q) {
4952 rc = -ENOMEM;
4953 goto err_out;
4954 }
4955
4956 skdev->queue = q;
4957 disk->queue = q;
4958 q->queuedata = skdev;
4959
4960 if (skd_bio) {
4961 q->queue_lock = &skdev->lock;
4962 blk_queue_make_request(q, skd_make_request);
4963 }
4964
4965 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
4966 blk_queue_max_segments(q, skdev->sgs_per_request);
4967 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4968
4969 /* set sysfs ptimal_io_size to 8K */
4970 blk_queue_io_opt(q, 8192);
4971
4972 /* DISCARD Flag initialization. */
4973 q->limits.discard_granularity = 8192;
4974 q->limits.discard_alignment = 0;
4975 q->limits.max_discard_sectors = UINT_MAX >> 9;
4976 q->limits.discard_zeroes_data = 1;
4977 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
4978 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4979
4980 spin_lock_irqsave(&skdev->lock, flags);
2e44b427 4981 pr_debug("%s:%s:%d stopping %s queue\n",
4982 skdev->name, __func__, __LINE__, skdev->name);
e67f86b3
AB
4983 skd_stop_queue(skdev);
4984 spin_unlock_irqrestore(&skdev->lock, flags);
4985
4986err_out:
4987 return rc;
4988}
4989
4990/*
4991 *****************************************************************************
4992 * DESTRUCT (FREE)
4993 *****************************************************************************
4994 */
4995
4996static void skd_free_skcomp(struct skd_device *skdev);
4997static void skd_free_skmsg(struct skd_device *skdev);
4998static void skd_free_skreq(struct skd_device *skdev);
4999static void skd_free_skspcl(struct skd_device *skdev);
5000static void skd_free_sksb(struct skd_device *skdev);
5001static void skd_free_sg_list(struct skd_device *skdev,
5002 struct fit_sg_descriptor *sg_list,
5003 u32 n_sg, dma_addr_t dma_addr);
5004static void skd_free_disk(struct skd_device *skdev);
5005
5006static void skd_destruct(struct skd_device *skdev)
5007{
5008 if (skdev == NULL)
5009 return;
5010
5011
2e44b427 5012 pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
5013 skd_free_disk(skdev);
5014
2e44b427 5015 pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
5016 skd_free_sksb(skdev);
5017
2e44b427 5018 pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
5019 skd_free_skspcl(skdev);
5020
2e44b427 5021 pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
5022 skd_free_skreq(skdev);
5023
2e44b427 5024 pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
5025 skd_free_skmsg(skdev);
5026
2e44b427 5027 pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
5028 skd_free_skcomp(skdev);
5029
2e44b427 5030 pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
5031 kfree(skdev);
5032}
5033
5034static void skd_free_skcomp(struct skd_device *skdev)
5035{
5036 if (skdev->skcomp_table != NULL) {
5037 u32 nbytes;
5038
5039 nbytes = sizeof(skdev->skcomp_table[0]) *
5040 SKD_N_COMPLETION_ENTRY;
5041 pci_free_consistent(skdev->pdev, nbytes,
5042 skdev->skcomp_table, skdev->cq_dma_address);
5043 }
5044
5045 skdev->skcomp_table = NULL;
5046 skdev->cq_dma_address = 0;
5047}
5048
5049static void skd_free_skmsg(struct skd_device *skdev)
5050{
5051 u32 i;
5052
5053 if (skdev->skmsg_table == NULL)
5054 return;
5055
5056 for (i = 0; i < skdev->num_fitmsg_context; i++) {
5057 struct skd_fitmsg_context *skmsg;
5058
5059 skmsg = &skdev->skmsg_table[i];
5060
5061 if (skmsg->msg_buf != NULL) {
5062 skmsg->msg_buf += skmsg->offset;
5063 skmsg->mb_dma_address += skmsg->offset;
5064 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
5065 skmsg->msg_buf,
5066 skmsg->mb_dma_address);
5067 }
5068 skmsg->msg_buf = NULL;
5069 skmsg->mb_dma_address = 0;
5070 }
5071
5072 kfree(skdev->skmsg_table);
5073 skdev->skmsg_table = NULL;
5074}
5075
5076static void skd_free_skreq(struct skd_device *skdev)
5077{
5078 u32 i;
5079
5080 if (skdev->skreq_table == NULL)
5081 return;
5082
5083 for (i = 0; i < skdev->num_req_context; i++) {
5084 struct skd_request_context *skreq;
5085
5086 skreq = &skdev->skreq_table[i];
5087
5088 skd_free_sg_list(skdev, skreq->sksg_list,
5089 skdev->sgs_per_request,
5090 skreq->sksg_dma_address);
5091
5092 skreq->sksg_list = NULL;
5093 skreq->sksg_dma_address = 0;
5094
5095 kfree(skreq->sg);
5096 }
5097
5098 kfree(skdev->skreq_table);
5099 skdev->skreq_table = NULL;
5100}
5101
5102static void skd_free_skspcl(struct skd_device *skdev)
5103{
5104 u32 i;
5105 u32 nbytes;
5106
5107 if (skdev->skspcl_table == NULL)
5108 return;
5109
5110 for (i = 0; i < skdev->n_special; i++) {
5111 struct skd_special_context *skspcl;
5112
5113 skspcl = &skdev->skspcl_table[i];
5114
5115 if (skspcl->msg_buf != NULL) {
5116 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
5117 pci_free_consistent(skdev->pdev, nbytes,
5118 skspcl->msg_buf,
5119 skspcl->mb_dma_address);
5120 }
5121
5122 skspcl->msg_buf = NULL;
5123 skspcl->mb_dma_address = 0;
5124
5125 skd_free_sg_list(skdev, skspcl->req.sksg_list,
5126 SKD_N_SG_PER_SPECIAL,
5127 skspcl->req.sksg_dma_address);
5128
5129 skspcl->req.sksg_list = NULL;
5130 skspcl->req.sksg_dma_address = 0;
5131
5132 kfree(skspcl->req.sg);
5133 }
5134
5135 kfree(skdev->skspcl_table);
5136 skdev->skspcl_table = NULL;
5137}
5138
5139static void skd_free_sksb(struct skd_device *skdev)
5140{
5141 struct skd_special_context *skspcl;
5142 u32 nbytes;
5143
5144 skspcl = &skdev->internal_skspcl;
5145
5146 if (skspcl->data_buf != NULL) {
5147 nbytes = SKD_N_INTERNAL_BYTES;
5148
5149 pci_free_consistent(skdev->pdev, nbytes,
5150 skspcl->data_buf, skspcl->db_dma_address);
5151 }
5152
5153 skspcl->data_buf = NULL;
5154 skspcl->db_dma_address = 0;
5155
5156 if (skspcl->msg_buf != NULL) {
5157 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
5158 pci_free_consistent(skdev->pdev, nbytes,
5159 skspcl->msg_buf, skspcl->mb_dma_address);
5160 }
5161
5162 skspcl->msg_buf = NULL;
5163 skspcl->mb_dma_address = 0;
5164
5165 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
5166 skspcl->req.sksg_dma_address);
5167
5168 skspcl->req.sksg_list = NULL;
5169 skspcl->req.sksg_dma_address = 0;
5170}
5171
5172static void skd_free_sg_list(struct skd_device *skdev,
5173 struct fit_sg_descriptor *sg_list,
5174 u32 n_sg, dma_addr_t dma_addr)
5175{
5176 if (sg_list != NULL) {
5177 u32 nbytes;
5178
5179 nbytes = sizeof(*sg_list) * n_sg;
5180
5181 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
5182 }
5183}
5184
5185static void skd_free_disk(struct skd_device *skdev)
5186{
5187 struct gendisk *disk = skdev->disk;
5188
5189 if (disk != NULL) {
5190 struct request_queue *q = disk->queue;
5191
5192 if (disk->flags & GENHD_FL_UP)
5193 del_gendisk(disk);
5194 if (q)
5195 blk_cleanup_queue(q);
5196 put_disk(disk);
5197 }
5198 skdev->disk = NULL;
5199}
5200
5201
5202
5203/*
5204 *****************************************************************************
5205 * BLOCK DEVICE (BDEV) GLUE
5206 *****************************************************************************
5207 */
5208
5209static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
5210{
5211 struct skd_device *skdev;
5212 u64 capacity;
5213
5214 skdev = bdev->bd_disk->private_data;
5215
2e44b427 5216 pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
5217 skdev->name, __func__, __LINE__,
5218 bdev->bd_disk->disk_name, current->comm);
e67f86b3
AB
5219
5220 if (skdev->read_cap_is_valid) {
5221 capacity = get_capacity(skdev->disk);
5222 geo->heads = 64;
5223 geo->sectors = 255;
5224 geo->cylinders = (capacity) / (255 * 64);
5225
5226 return 0;
5227 }
5228 return -EIO;
5229}
5230
5231static int skd_bdev_attach(struct skd_device *skdev)
5232{
2e44b427 5233 pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
e67f86b3
AB
5234 add_disk(skdev->disk);
5235 return 0;
5236}
5237
5238static const struct block_device_operations skd_blockdev_ops = {
5239 .owner = THIS_MODULE,
5240 .ioctl = skd_bdev_ioctl,
5241 .getgeo = skd_bdev_getgeo,
5242};
5243
5244
5245/*
5246 *****************************************************************************
5247 * PCIe DRIVER GLUE
5248 *****************************************************************************
5249 */
5250
5251static DEFINE_PCI_DEVICE_TABLE(skd_pci_tbl) = {
5252 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
5253 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
5254 { 0 } /* terminate list */
5255};
5256
5257MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
5258
5259static char *skd_pci_info(struct skd_device *skdev, char *str)
5260{
5261 int pcie_reg;
5262
5263 strcpy(str, "PCIe (");
5264 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
5265
5266 if (pcie_reg) {
5267
5268 char lwstr[6];
5269 uint16_t pcie_lstat, lspeed, lwidth;
5270
5271 pcie_reg += 0x12;
5272 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
5273 lspeed = pcie_lstat & (0xF);
5274 lwidth = (pcie_lstat & 0x3F0) >> 4;
5275
5276 if (lspeed == 1)
5277 strcat(str, "2.5GT/s ");
5278 else if (lspeed == 2)
5279 strcat(str, "5.0GT/s ");
5280 else
5281 strcat(str, "<unknown> ");
5282 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
5283 strcat(str, lwstr);
5284 }
5285 return str;
5286}
5287
5288static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5289{
5290 int i;
5291 int rc = 0;
5292 char pci_str[32];
5293 struct skd_device *skdev;
5294
5295 pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
5296 DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
5297 pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
5298 pci_name(pdev), pdev->vendor, pdev->device);
5299
5300 rc = pci_enable_device(pdev);
5301 if (rc)
5302 return rc;
5303 rc = pci_request_regions(pdev, DRV_NAME);
5304 if (rc)
5305 goto err_out;
5306 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5307 if (!rc) {
5308 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
5309
5310 pr_err("(%s): consistent DMA mask error %d\n",
5311 pci_name(pdev), rc);
5312 }
5313 } else {
5314 (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
5315 if (rc) {
5316
5317 pr_err("(%s): DMA mask error %d\n",
5318 pci_name(pdev), rc);
5319 goto err_out_regions;
5320 }
5321 }
5322
5323 skdev = skd_construct(pdev);
1762b57f
WY
5324 if (skdev == NULL) {
5325 rc = -ENOMEM;
e67f86b3 5326 goto err_out_regions;
1762b57f 5327 }
e67f86b3
AB
5328
5329 skd_pci_info(skdev, pci_str);
5330 pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
5331
5332 pci_set_master(pdev);
5333 rc = pci_enable_pcie_error_reporting(pdev);
5334 if (rc) {
5335 pr_err(
5336 "(%s): bad enable of PCIe error reporting rc=%d\n",
5337 skd_name(skdev), rc);
5338 skdev->pcie_error_reporting_is_enabled = 0;
5339 } else
5340 skdev->pcie_error_reporting_is_enabled = 1;
5341
5342
5343 pci_set_drvdata(pdev, skdev);
5344 skdev->pdev = pdev;
5345 skdev->disk->driverfs_dev = &pdev->dev;
5346
5347 for (i = 0; i < SKD_MAX_BARS; i++) {
5348 skdev->mem_phys[i] = pci_resource_start(pdev, i);
5349 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
5350 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
5351 skdev->mem_size[i]);
5352 if (!skdev->mem_map[i]) {
5353 pr_err("(%s): Unable to map adapter memory!\n",
5354 skd_name(skdev));
5355 rc = -ENODEV;
5356 goto err_out_iounmap;
5357 }
2e44b427 5358 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
5359 skdev->name, __func__, __LINE__,
5360 skdev->mem_map[i],
5361 (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
e67f86b3
AB
5362 }
5363
5364 rc = skd_acquire_irq(skdev);
5365 if (rc) {
5366 pr_err("(%s): interrupt resource error %d\n",
5367 skd_name(skdev), rc);
5368 goto err_out_iounmap;
5369 }
5370
5371 rc = skd_start_timer(skdev);
5372 if (rc)
5373 goto err_out_timer;
5374
5375 init_waitqueue_head(&skdev->waitq);
5376
5377 skd_start_device(skdev);
5378
5379 rc = wait_event_interruptible_timeout(skdev->waitq,
5380 (skdev->gendisk_on),
5381 (SKD_START_WAIT_SECONDS * HZ));
5382 if (skdev->gendisk_on > 0) {
5383 /* device came on-line after reset */
5384 skd_bdev_attach(skdev);
5385 rc = 0;
5386 } else {
5387 /* we timed out, something is wrong with the device,
5388 don't add the disk structure */
5389 pr_err(
5390 "(%s): error: waiting for s1120 timed out %d!\n",
5391 skd_name(skdev), rc);
5392 /* in case of no error; we timeout with ENXIO */
5393 if (!rc)
5394 rc = -ENXIO;
5395 goto err_out_timer;
5396 }
5397
5398
5399#ifdef SKD_VMK_POLL_HANDLER
5400 if (skdev->irq_type == SKD_IRQ_MSIX) {
5401 /* MSIX completion handler is being used for coredump */
5402 vmklnx_scsi_register_poll_handler(skdev->scsi_host,
5403 skdev->msix_entries[5].vector,
5404 skd_comp_q, skdev);
5405 } else {
5406 vmklnx_scsi_register_poll_handler(skdev->scsi_host,
5407 skdev->pdev->irq, skd_isr,
5408 skdev);
5409 }
5410#endif /* SKD_VMK_POLL_HANDLER */
5411
5412 return rc;
5413
5414err_out_timer:
5415 skd_stop_device(skdev);
5416 skd_release_irq(skdev);
5417
5418err_out_iounmap:
5419 for (i = 0; i < SKD_MAX_BARS; i++)
5420 if (skdev->mem_map[i])
5421 iounmap(skdev->mem_map[i]);
5422
5423 if (skdev->pcie_error_reporting_is_enabled)
5424 pci_disable_pcie_error_reporting(pdev);
5425
5426 skd_destruct(skdev);
5427
5428err_out_regions:
5429 pci_release_regions(pdev);
5430
5431err_out:
5432 pci_disable_device(pdev);
5433 pci_set_drvdata(pdev, NULL);
5434 return rc;
5435}
5436
5437static void skd_pci_remove(struct pci_dev *pdev)
5438{
5439 int i;
5440 struct skd_device *skdev;
5441
5442 skdev = pci_get_drvdata(pdev);
5443 if (!skdev) {
5444 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5445 return;
5446 }
5447 skd_stop_device(skdev);
5448 skd_release_irq(skdev);
5449
5450 for (i = 0; i < SKD_MAX_BARS; i++)
5451 if (skdev->mem_map[i])
5452 iounmap((u32 *)skdev->mem_map[i]);
5453
5454 if (skdev->pcie_error_reporting_is_enabled)
5455 pci_disable_pcie_error_reporting(pdev);
5456
5457 skd_destruct(skdev);
5458
5459 pci_release_regions(pdev);
5460 pci_disable_device(pdev);
5461 pci_set_drvdata(pdev, NULL);
5462
5463 return;
5464}
5465
5466static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
5467{
5468 int i;
5469 struct skd_device *skdev;
5470
5471 skdev = pci_get_drvdata(pdev);
5472 if (!skdev) {
5473 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5474 return -EIO;
5475 }
5476
5477 skd_stop_device(skdev);
5478
5479 skd_release_irq(skdev);
5480
5481 for (i = 0; i < SKD_MAX_BARS; i++)
5482 if (skdev->mem_map[i])
5483 iounmap((u32 *)skdev->mem_map[i]);
5484
5485 if (skdev->pcie_error_reporting_is_enabled)
5486 pci_disable_pcie_error_reporting(pdev);
5487
5488 pci_release_regions(pdev);
5489 pci_save_state(pdev);
5490 pci_disable_device(pdev);
5491 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5492 return 0;
5493}
5494
5495static int skd_pci_resume(struct pci_dev *pdev)
5496{
5497 int i;
5498 int rc = 0;
5499 struct skd_device *skdev;
5500
5501 skdev = pci_get_drvdata(pdev);
5502 if (!skdev) {
5503 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5504 return -1;
5505 }
5506
5507 pci_set_power_state(pdev, PCI_D0);
5508 pci_enable_wake(pdev, PCI_D0, 0);
5509 pci_restore_state(pdev);
5510
5511 rc = pci_enable_device(pdev);
5512 if (rc)
5513 return rc;
5514 rc = pci_request_regions(pdev, DRV_NAME);
5515 if (rc)
5516 goto err_out;
5517 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5518 if (!rc) {
5519 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
5520
5521 pr_err("(%s): consistent DMA mask error %d\n",
5522 pci_name(pdev), rc);
5523 }
5524 } else {
5525 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5526 if (rc) {
5527
5528 pr_err("(%s): DMA mask error %d\n",
5529 pci_name(pdev), rc);
5530 goto err_out_regions;
5531 }
5532 }
5533
5534 pci_set_master(pdev);
5535 rc = pci_enable_pcie_error_reporting(pdev);
5536 if (rc) {
5537 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
5538 skdev->name, rc);
5539 skdev->pcie_error_reporting_is_enabled = 0;
5540 } else
5541 skdev->pcie_error_reporting_is_enabled = 1;
5542
5543 for (i = 0; i < SKD_MAX_BARS; i++) {
5544
5545 skdev->mem_phys[i] = pci_resource_start(pdev, i);
5546 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
5547 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
5548 skdev->mem_size[i]);
5549 if (!skdev->mem_map[i]) {
5550 pr_err("(%s): Unable to map adapter memory!\n",
5551 skd_name(skdev));
5552 rc = -ENODEV;
5553 goto err_out_iounmap;
5554 }
2e44b427 5555 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
5556 skdev->name, __func__, __LINE__,
5557 skdev->mem_map[i],
5558 (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
e67f86b3
AB
5559 }
5560 rc = skd_acquire_irq(skdev);
5561 if (rc) {
5562
5563 pr_err("(%s): interrupt resource error %d\n",
5564 pci_name(pdev), rc);
5565 goto err_out_iounmap;
5566 }
5567
5568 rc = skd_start_timer(skdev);
5569 if (rc)
5570 goto err_out_timer;
5571
5572 init_waitqueue_head(&skdev->waitq);
5573
5574 skd_start_device(skdev);
5575
5576 return rc;
5577
5578err_out_timer:
5579 skd_stop_device(skdev);
5580 skd_release_irq(skdev);
5581
5582err_out_iounmap:
5583 for (i = 0; i < SKD_MAX_BARS; i++)
5584 if (skdev->mem_map[i])
5585 iounmap(skdev->mem_map[i]);
5586
5587 if (skdev->pcie_error_reporting_is_enabled)
5588 pci_disable_pcie_error_reporting(pdev);
5589
5590err_out_regions:
5591 pci_release_regions(pdev);
5592
5593err_out:
5594 pci_disable_device(pdev);
5595 return rc;
5596}
5597
5598static void skd_pci_shutdown(struct pci_dev *pdev)
5599{
5600 struct skd_device *skdev;
5601
5602 pr_err("skd_pci_shutdown called\n");
5603
5604 skdev = pci_get_drvdata(pdev);
5605 if (!skdev) {
5606 pr_err("%s: no device data for PCI\n", pci_name(pdev));
5607 return;
5608 }
5609
5610 pr_err("%s: calling stop\n", skd_name(skdev));
5611 skd_stop_device(skdev);
5612}
5613
5614static struct pci_driver skd_driver = {
5615 .name = DRV_NAME,
5616 .id_table = skd_pci_tbl,
5617 .probe = skd_pci_probe,
5618 .remove = skd_pci_remove,
5619 .suspend = skd_pci_suspend,
5620 .resume = skd_pci_resume,
5621 .shutdown = skd_pci_shutdown,
5622};
5623
5624/*
5625 *****************************************************************************
5626 * LOGGING SUPPORT
5627 *****************************************************************************
5628 */
5629
5630static const char *skd_name(struct skd_device *skdev)
5631{
5632 memset(skdev->id_str, 0, sizeof(skdev->id_str));
5633
5634 if (skdev->inquiry_is_valid)
5635 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
5636 skdev->name, skdev->inq_serial_num,
5637 pci_name(skdev->pdev));
5638 else
5639 snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
5640 skdev->name, pci_name(skdev->pdev));
5641
5642 return skdev->id_str;
5643}
5644
5645const char *skd_drive_state_to_str(int state)
5646{
5647 switch (state) {
5648 case FIT_SR_DRIVE_OFFLINE:
5649 return "OFFLINE";
5650 case FIT_SR_DRIVE_INIT:
5651 return "INIT";
5652 case FIT_SR_DRIVE_ONLINE:
5653 return "ONLINE";
5654 case FIT_SR_DRIVE_BUSY:
5655 return "BUSY";
5656 case FIT_SR_DRIVE_FAULT:
5657 return "FAULT";
5658 case FIT_SR_DRIVE_DEGRADED:
5659 return "DEGRADED";
5660 case FIT_SR_PCIE_LINK_DOWN:
5661 return "INK_DOWN";
5662 case FIT_SR_DRIVE_SOFT_RESET:
5663 return "SOFT_RESET";
5664 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
5665 return "NEED_FW";
5666 case FIT_SR_DRIVE_INIT_FAULT:
5667 return "INIT_FAULT";
5668 case FIT_SR_DRIVE_BUSY_SANITIZE:
5669 return "BUSY_SANITIZE";
5670 case FIT_SR_DRIVE_BUSY_ERASE:
5671 return "BUSY_ERASE";
5672 case FIT_SR_DRIVE_FW_BOOTING:
5673 return "FW_BOOTING";
5674 default:
5675 return "???";
5676 }
5677}
5678
5679const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5680{
5681 switch (state) {
5682 case SKD_DRVR_STATE_LOAD:
5683 return "LOAD";
5684 case SKD_DRVR_STATE_IDLE:
5685 return "IDLE";
5686 case SKD_DRVR_STATE_BUSY:
5687 return "BUSY";
5688 case SKD_DRVR_STATE_STARTING:
5689 return "STARTING";
5690 case SKD_DRVR_STATE_ONLINE:
5691 return "ONLINE";
5692 case SKD_DRVR_STATE_PAUSING:
5693 return "PAUSING";
5694 case SKD_DRVR_STATE_PAUSED:
5695 return "PAUSED";
5696 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
5697 return "DRAINING_TIMEOUT";
5698 case SKD_DRVR_STATE_RESTARTING:
5699 return "RESTARTING";
5700 case SKD_DRVR_STATE_RESUMING:
5701 return "RESUMING";
5702 case SKD_DRVR_STATE_STOPPING:
5703 return "STOPPING";
5704 case SKD_DRVR_STATE_SYNCING:
5705 return "SYNCING";
5706 case SKD_DRVR_STATE_FAULT:
5707 return "FAULT";
5708 case SKD_DRVR_STATE_DISAPPEARED:
5709 return "DISAPPEARED";
5710 case SKD_DRVR_STATE_BUSY_ERASE:
5711 return "BUSY_ERASE";
5712 case SKD_DRVR_STATE_BUSY_SANITIZE:
5713 return "BUSY_SANITIZE";
5714 case SKD_DRVR_STATE_BUSY_IMMINENT:
5715 return "BUSY_IMMINENT";
5716 case SKD_DRVR_STATE_WAIT_BOOT:
5717 return "WAIT_BOOT";
5718
5719 default:
5720 return "???";
5721 }
5722}
5723
5724const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5725{
5726 switch (state) {
5727 case SKD_MSG_STATE_IDLE:
5728 return "IDLE";
5729 case SKD_MSG_STATE_BUSY:
5730 return "BUSY";
5731 default:
5732 return "???";
5733 }
5734}
5735
5736const char *skd_skreq_state_to_str(enum skd_req_state state)
5737{
5738 switch (state) {
5739 case SKD_REQ_STATE_IDLE:
5740 return "IDLE";
5741 case SKD_REQ_STATE_SETUP:
5742 return "SETUP";
5743 case SKD_REQ_STATE_BUSY:
5744 return "BUSY";
5745 case SKD_REQ_STATE_COMPLETED:
5746 return "COMPLETED";
5747 case SKD_REQ_STATE_TIMEOUT:
5748 return "TIMEOUT";
5749 case SKD_REQ_STATE_ABORTED:
5750 return "ABORTED";
5751 default:
5752 return "???";
5753 }
5754}
5755
5756static void skd_log_skdev(struct skd_device *skdev, const char *event)
5757{
2e44b427 5758 pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
5759 skdev->name, __func__, __LINE__, skdev->name, skdev, event);
5760 pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
5761 skdev->name, __func__, __LINE__,
5762 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
5763 skd_skdev_state_to_str(skdev->state), skdev->state);
5764 pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
5765 skdev->name, __func__, __LINE__,
5766 skdev->in_flight, skdev->cur_max_queue_depth,
5767 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
5768 pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
5769 skdev->name, __func__, __LINE__,
5770 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
e67f86b3
AB
5771}
5772
5773static void skd_log_skmsg(struct skd_device *skdev,
5774 struct skd_fitmsg_context *skmsg, const char *event)
5775{
2e44b427 5776 pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
5777 skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
5778 pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
5779 skdev->name, __func__, __LINE__,
5780 skd_skmsg_state_to_str(skmsg->state), skmsg->state,
5781 skmsg->id, skmsg->length);
e67f86b3
AB
5782}
5783
5784static void skd_log_skreq(struct skd_device *skdev,
5785 struct skd_request_context *skreq, const char *event)
5786{
2e44b427 5787 pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
5788 skdev->name, __func__, __LINE__, skdev->name, skreq, event);
5789 pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
5790 skdev->name, __func__, __LINE__,
5791 skd_skreq_state_to_str(skreq->state), skreq->state,
5792 skreq->id, skreq->fitmsg_id);
5793 pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
5794 skdev->name, __func__, __LINE__,
5795 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
e67f86b3
AB
5796
5797 if (!skd_bio) {
5798 if (skreq->req != NULL) {
5799 struct request *req = skreq->req;
5800 u32 lba = (u32)blk_rq_pos(req);
5801 u32 count = blk_rq_sectors(req);
5802
2e44b427 5803 pr_debug("%s:%s:%d "
5804 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5805 skdev->name, __func__, __LINE__,
5806 req, lba, lba, count, count,
5807 (int)rq_data_dir(req));
e67f86b3 5808 } else
2e44b427 5809 pr_debug("%s:%s:%d req=NULL\n",
5810 skdev->name, __func__, __LINE__);
e67f86b3
AB
5811 } else {
5812 if (skreq->bio != NULL) {
5813 struct bio *bio = skreq->bio;
5814 u32 lba = (u32)bio->bi_sector;
5815 u32 count = bio_sectors(bio);
5816
2e44b427 5817 pr_debug("%s:%s:%d "
5818 "bio=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5819 skdev->name, __func__, __LINE__,
5820 bio, lba, lba, count, count,
5821 (int)bio_data_dir(bio));
e67f86b3 5822 } else
2e44b427 5823 pr_debug("%s:%s:%d req=NULL\n",
5824 skdev->name, __func__, __LINE__);
e67f86b3
AB
5825 }
5826}
5827
5828/*
5829 *****************************************************************************
5830 * MODULE GLUE
5831 *****************************************************************************
5832 */
5833
5834static int __init skd_init(void)
5835{
5836 int rc = 0;
5837
5838 pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5839
5840 switch (skd_isr_type) {
5841 case SKD_IRQ_LEGACY:
5842 case SKD_IRQ_MSI:
5843 case SKD_IRQ_MSIX:
5844 break;
5845 default:
5846 pr_info("skd_isr_type %d invalid, re-set to %d\n",
5847 skd_isr_type, SKD_IRQ_DEFAULT);
5848 skd_isr_type = SKD_IRQ_DEFAULT;
5849 }
5850
5851 skd_flush_slab = kmem_cache_create(SKD_FLUSH_JOB,
5852 sizeof(struct skd_flush_cmd),
5853 0, 0, NULL);
5854
5855 if (!skd_flush_slab) {
5856 pr_err("failed to allocated flush slab.\n");
5857 return -ENOMEM;
5858 }
5859
5860 if (skd_max_queue_depth < 1
5861 || skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5862 pr_info(
5863 "skd_max_queue_depth %d invalid, re-set to %d\n",
5864 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5865 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5866 }
5867
5868 if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
5869 pr_info(
5870 "skd_max_req_per_msg %d invalid, re-set to %d\n",
5871 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5872 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5873 }
5874
5875 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
5876 pr_info(
5877 "skd_sg_per_request %d invalid, re-set to %d\n",
5878 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5879 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5880 }
5881
5882 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
5883 pr_info("skd_dbg_level %d invalid, re-set to %d\n",
5884 skd_dbg_level, 0);
5885 skd_dbg_level = 0;
5886 }
5887
5888 if (skd_isr_comp_limit < 0) {
5889 pr_info("skd_isr_comp_limit %d invalid, set to %d\n",
5890 skd_isr_comp_limit, 0);
5891 skd_isr_comp_limit = 0;
5892 }
5893
5894 if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
5895 pr_info("skd_max_pass_thru %d invalid, re-set to %d\n",
5896 skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5897 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5898 }
5899
5900 /* Obtain major device number. */
5901 rc = register_blkdev(0, DRV_NAME);
5902 if (rc < 0)
5903 return rc;
5904
5905 skd_major = rc;
5906
5907 return pci_register_driver(&skd_driver);
5908
5909}
5910
5911static void __exit skd_exit(void)
5912{
5913 pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5914
5915 unregister_blkdev(skd_major, DRV_NAME);
5916 pci_unregister_driver(&skd_driver);
5917
5918 kmem_cache_destroy(skd_flush_slab);
5919}
5920
5921static int
5922skd_flush_cmd_enqueue(struct skd_device *skdev, void *cmd)
5923{
5924 struct skd_flush_cmd *item;
5925
5926 item = kmem_cache_zalloc(skd_flush_slab, GFP_ATOMIC);
5927 if (!item) {
5928 pr_err("skd_flush_cmd_enqueue: Failed to allocated item.\n");
5929 return -ENOMEM;
5930 }
5931
5932 item->cmd = cmd;
5933 list_add_tail(&item->flist, &skdev->flush_list);
5934 return 0;
5935}
5936
5937static void *
5938skd_flush_cmd_dequeue(struct skd_device *skdev)
5939{
5940 void *cmd;
5941 struct skd_flush_cmd *item;
5942
5943 item = list_entry(skdev->flush_list.next, struct skd_flush_cmd, flist);
5944 list_del_init(&item->flist);
5945 cmd = item->cmd;
5946 kmem_cache_free(skd_flush_slab, item);
5947 return cmd;
5948}
5949
5950module_init(skd_init);
5951module_exit(skd_exit);